From 03f4cfb7cda62bf40e4a10f6f00ca161bb68a48e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Mar 2024 00:56:41 -0400 Subject: [PATCH 1/4] Replace more prints with logging. --- latent_preview.py | 3 ++- nodes.py | 29 +++++++++++++++-------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/latent_preview.py b/latent_preview.py index 61754751e..4dbcbf455 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -6,6 +6,7 @@ from comfy.cli_args import args, LatentPreviewMethod from comfy.taesd.taesd import TAESD import folder_paths import comfy.utils +import logging MAX_PREVIEW_RESOLUTION = 512 @@ -70,7 +71,7 @@ def get_previewer(device, latent_format): taesd = TAESD(None, taesd_decoder_path).to(device) previewer = TAESDPreviewerImpl(taesd) else: - print("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name)) + logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name)) if previewer is None: if latent_format.latent_rgb_factors is not None: diff --git a/nodes.py b/nodes.py index c334cdb95..ca14677d5 100644 --- a/nodes.py +++ b/nodes.py @@ -8,6 +8,7 @@ import traceback import math import time import random +import logging from PIL import Image, ImageOps, ImageSequence from PIL.PngImagePlugin import PngInfo @@ -83,7 +84,7 @@ class ConditioningAverage : out = [] if len(conditioning_from) > 1: - print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") + logging.warning("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") cond_from = conditioning_from[0][0] pooled_output_from = conditioning_from[0][1].get("pooled_output", None) @@ -122,7 +123,7 @@ class ConditioningConcat: out = [] if len(conditioning_from) > 1: - print("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") + logging.warning("Warning: ConditioningConcat conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.") cond_from = conditioning_from[0][0] @@ -1899,11 +1900,11 @@ def load_custom_node(module_path, ignore=set()): NODE_DISPLAY_NAME_MAPPINGS.update(module.NODE_DISPLAY_NAME_MAPPINGS) return True else: - print(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.") + logging.warning(f"Skip {module_path} module for custom nodes due to the lack of NODE_CLASS_MAPPINGS.") return False except Exception as e: - print(traceback.format_exc()) - print(f"Cannot import {module_path} module for custom nodes:", e) + logging.warning(traceback.format_exc()) + logging.warning(f"Cannot import {module_path} module for custom nodes:", e) return False def load_custom_nodes(): @@ -1924,14 +1925,14 @@ def load_custom_nodes(): node_import_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_import_times) > 0: - print("\nImport times for custom nodes:") + logging.warning("\nImport times for custom nodes:") for n in sorted(node_import_times): if n[2]: import_message = "" else: import_message = " (IMPORT FAILED)" - print("{:6.1f} seconds{}:".format(n[0], import_message), n[1]) - print() + logging.warning("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.warning("") def init_custom_nodes(): extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras") @@ -1973,12 +1974,12 @@ def init_custom_nodes(): load_custom_nodes() if len(import_failed) > 0: - print("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n") + logging.warning("WARNING: some comfy_extras/ nodes did not import correctly. This may be because they are missing some dependencies.\n") for node in import_failed: - print("IMPORT FAILED: {}".format(node)) - print("\nThis issue might be caused by missing newly dependencies added the last time you updated ComfyUI.") + logging.warning("IMPORT FAILED: {}".format(node)) + logging.warning("\nThis issue might be caused by new missing dependencies added the last time you updated ComfyUI.") if args.windows_standalone_build: - print("Please run the update script: update/update_comfyui.bat") + logging.warning("Please run the update script: update/update_comfyui.bat") else: - print("Please do a: pip install -r requirements.txt") - print() + logging.warning("Please do a: pip install -r requirements.txt") + logging.warning("") From dc6d4151a2dc411e69361e219af3a81863fc6dc4 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Mar 2024 12:30:11 -0400 Subject: [PATCH 2/4] Not needed anymore. --- server.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/server.py b/server.py index c6132cdf9..7b4f910e3 100644 --- a/server.py +++ b/server.py @@ -15,15 +15,8 @@ from PIL import Image, ImageOps from PIL.PngImagePlugin import PngInfo from io import BytesIO -try: - import aiohttp - from aiohttp import web -except ImportError: - print("Module 'aiohttp' not installed. Please install it via:") - print("pip install aiohttp") - print("or") - print("pip install -r requirements.txt") - sys.exit() +import aiohttp +from aiohttp import web import mimetypes from comfy.cli_args import args From 0ed72befe18e086ac160f1a55aa69b37c928ebb9 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Mar 2024 13:54:56 -0400 Subject: [PATCH 3/4] Change log levels. Logging level now defaults to info. --verbose sets it to debug. --- comfy/cli_args.py | 2 +- comfy/controlnet.py | 4 ++-- comfy/diffusers_convert.py | 2 +- comfy/model_base.py | 4 ++-- comfy/model_management.py | 28 ++++++++++++++-------------- comfy/sd.py | 12 ++++++------ comfy/utils.py | 2 +- nodes.py | 6 +++--- server.py | 15 ++++++++------- 9 files changed, 38 insertions(+), 37 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 757fc245f..353bb51e7 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -129,7 +129,7 @@ if args.disable_auto_launch: args.auto_launch = False import logging -logging_level = logging.WARNING +logging_level = logging.INFO if args.verbose: logging_level = logging.DEBUG diff --git a/comfy/controlnet.py b/comfy/controlnet.py index a5e7b23f7..1a72412b1 100644 --- a/comfy/controlnet.py +++ b/comfy/controlnet.py @@ -432,7 +432,7 @@ def load_controlnet(ckpt_path, model=None): logging.warning("missing controlnet keys: {}".format(missing)) if len(unexpected) > 0: - logging.info("unexpected controlnet keys: {}".format(unexpected)) + logging.debug("unexpected controlnet keys: {}".format(unexpected)) global_average_pooling = False filename = os.path.splitext(ckpt_path)[0] @@ -545,6 +545,6 @@ def load_t2i_adapter(t2i_data): logging.warning("t2i missing {}".format(missing)) if len(unexpected) > 0: - logging.info("t2i unexpected {}".format(unexpected)) + logging.debug("t2i unexpected {}".format(unexpected)) return T2IAdapter(model_ad, model_ad.input_channels, compression_ratio, upscale_algorithm) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index 18398cb32..08018c54d 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -178,7 +178,7 @@ def convert_vae_state_dict(vae_state_dict): for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: - logging.info(f"Reshaping {k} for SD format") + logging.debug(f"Reshaping {k} for SD format") new_state_dict[k] = reshape_weight_for_sd(v) return new_state_dict diff --git a/comfy/model_base.py b/comfy/model_base.py index a2514ca5e..5da71e632 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -67,8 +67,8 @@ class BaseModel(torch.nn.Module): if self.adm_channels is None: self.adm_channels = 0 self.inpaint_model = False - logging.warning("model_type {}".format(model_type.name)) - logging.info("adm {}".format(self.adm_channels)) + logging.info("model_type {}".format(model_type.name)) + logging.debug("adm {}".format(self.adm_channels)) def apply_model(self, x, t, c_concat=None, c_crossattn=None, control=None, transformer_options={}, **kwargs): sigma = t diff --git a/comfy/model_management.py b/comfy/model_management.py index dd262e260..2f0a0a627 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -30,7 +30,7 @@ lowvram_available = True xpu_available = False if args.deterministic: - logging.warning("Using deterministic algorithms for pytorch") + logging.info("Using deterministic algorithms for pytorch") torch.use_deterministic_algorithms(True, warn_only=True) directml_enabled = False @@ -42,7 +42,7 @@ if args.directml is not None: directml_device = torch_directml.device() else: directml_device = torch_directml.device(device_index) - logging.warning("Using directml with device: {}".format(torch_directml.device_name(device_index))) + logging.info("Using directml with device: {}".format(torch_directml.device_name(device_index))) # torch_directml.disable_tiled_resources(True) lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default. @@ -118,7 +118,7 @@ def get_total_memory(dev=None, torch_total_too=False): total_vram = get_total_memory(get_torch_device()) / (1024 * 1024) total_ram = psutil.virtual_memory().total / (1024 * 1024) -logging.warning("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) +logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)) if not args.normalvram and not args.cpu: if lowvram_available and total_vram <= 4096: logging.warning("Trying to enable lowvram mode because your GPU seems to have 4GB or less. If you don't want this use: --normalvram") @@ -144,7 +144,7 @@ else: pass try: XFORMERS_VERSION = xformers.version.__version__ - logging.warning("xformers version: {}".format(XFORMERS_VERSION)) + logging.info("xformers version: {}".format(XFORMERS_VERSION)) if XFORMERS_VERSION.startswith("0.0.18"): logging.warning("\nWARNING: This version of xformers has a major bug where you will get black images when generating high resolution images.") logging.warning("Please downgrade or upgrade xformers to a different version.\n") @@ -212,11 +212,11 @@ elif args.highvram or args.gpu_only: FORCE_FP32 = False FORCE_FP16 = False if args.force_fp32: - logging.warning("Forcing FP32, if this improves things please report it.") + logging.info("Forcing FP32, if this improves things please report it.") FORCE_FP32 = True if args.force_fp16: - logging.warning("Forcing FP16.") + logging.info("Forcing FP16.") FORCE_FP16 = True if lowvram_available: @@ -230,12 +230,12 @@ if cpu_state != CPUState.GPU: if cpu_state == CPUState.MPS: vram_state = VRAMState.SHARED -logging.warning(f"Set vram state to: {vram_state.name}") +logging.info(f"Set vram state to: {vram_state.name}") DISABLE_SMART_MEMORY = args.disable_smart_memory if DISABLE_SMART_MEMORY: - logging.warning("Disabling smart memory management") + logging.info("Disabling smart memory management") def get_torch_device_name(device): if hasattr(device, 'type'): @@ -253,11 +253,11 @@ def get_torch_device_name(device): return "CUDA {}: {}".format(device, torch.cuda.get_device_name(device)) try: - logging.warning("Device: {}".format(get_torch_device_name(get_torch_device()))) + logging.info("Device: {}".format(get_torch_device_name(get_torch_device()))) except: logging.warning("Could not pick default device.") -logging.warning("VAE dtype: {}".format(VAE_DTYPE)) +logging.info("VAE dtype: {}".format(VAE_DTYPE)) current_loaded_models = [] @@ -300,7 +300,7 @@ class LoadedModel: raise e if lowvram_model_memory > 0: - logging.warning("loading in lowvram mode {}".format(lowvram_model_memory/(1024 * 1024))) + logging.info("loading in lowvram mode {}".format(lowvram_model_memory/(1024 * 1024))) mem_counter = 0 for m in self.real_model.modules(): if hasattr(m, "comfy_cast_weights"): @@ -347,7 +347,7 @@ def unload_model_clones(model): to_unload = [i] + to_unload for i in to_unload: - logging.warning("unload clone {}".format(i)) + logging.debug("unload clone {}".format(i)) current_loaded_models.pop(i).model_unload() def free_memory(memory_required, device, keep_loaded=[]): @@ -389,7 +389,7 @@ def load_models_gpu(models, memory_required=0): models_already_loaded.append(loaded_model) else: if hasattr(x, "model"): - logging.warning(f"Requested to load {x.model.__class__.__name__}") + logging.info(f"Requested to load {x.model.__class__.__name__}") models_to_load.append(loaded_model) if len(models_to_load) == 0: @@ -399,7 +399,7 @@ def load_models_gpu(models, memory_required=0): free_memory(extra_mem, d, models_already_loaded) return - logging.warning(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}") + logging.info(f"Loading {len(models_to_load)} new model{'s' if len(models_to_load) > 1 else ''}") total_memory_required = {} for loaded_model in models_to_load: diff --git a/comfy/sd.py b/comfy/sd.py index 3e4b9e47f..85821120e 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -229,7 +229,7 @@ class VAE: logging.warning("Missing VAE keys {}".format(m)) if len(u) > 0: - logging.info("Leftover VAE keys {}".format(u)) + logging.debug("Leftover VAE keys {}".format(u)) if device is None: device = model_management.vae_device() @@ -397,7 +397,7 @@ def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DI logging.warning("clip missing: {}".format(m)) if len(u) > 0: - logging.info("clip unexpected: {}".format(u)) + logging.debug("clip unexpected: {}".format(u)) return clip def load_gligen(ckpt_path): @@ -538,18 +538,18 @@ def load_checkpoint_guess_config(ckpt_path, output_vae=True, output_clip=True, o logging.warning("clip missing: {}".format(m)) if len(u) > 0: - logging.info("clip unexpected {}:".format(u)) + logging.debug("clip unexpected {}:".format(u)) else: logging.warning("no CLIP/text encoder weights in checkpoint, the text encoder model will not be loaded.") left_over = sd.keys() if len(left_over) > 0: - logging.info("left over keys: {}".format(left_over)) + logging.debug("left over keys: {}".format(left_over)) if output_model: model_patcher = comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=model_management.unet_offload_device(), current_device=inital_load_device) if inital_load_device != torch.device("cpu"): - logging.warning("loaded straight to GPU") + logging.info("loaded straight to GPU") model_management.load_model_gpu(model_patcher) return (model_patcher, clip, vae, clipvision) @@ -589,7 +589,7 @@ def load_unet_state_dict(sd): #load unet in diffusers format model.load_model_weights(new_sd, "") left_over = sd.keys() if len(left_over) > 0: - logging.warning("left over keys in unet: {}".format(left_over)) + logging.info("left over keys in unet: {}".format(left_over)) return comfy.model_patcher.ModelPatcher(model, load_device=load_device, offload_device=offload_device) def load_unet(unet_path): diff --git a/comfy/utils.py b/comfy/utils.py index 8caecd866..ab47b8f28 100644 --- a/comfy/utils.py +++ b/comfy/utils.py @@ -22,7 +22,7 @@ def load_torch_file(ckpt, safe_load=False, device=None): else: pl_sd = torch.load(ckpt, map_location=device, pickle_module=comfy.checkpoint_pickle) if "global_step" in pl_sd: - logging.info(f"Global Step: {pl_sd['global_step']}") + logging.debug(f"Global Step: {pl_sd['global_step']}") if "state_dict" in pl_sd: sd = pl_sd["state_dict"] else: diff --git a/nodes.py b/nodes.py index ca14677d5..e2f35dc27 100644 --- a/nodes.py +++ b/nodes.py @@ -1925,14 +1925,14 @@ def load_custom_nodes(): node_import_times.append((time.perf_counter() - time_before, module_path, success)) if len(node_import_times) > 0: - logging.warning("\nImport times for custom nodes:") + logging.info("\nImport times for custom nodes:") for n in sorted(node_import_times): if n[2]: import_message = "" else: import_message = " (IMPORT FAILED)" - logging.warning("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) - logging.warning("") + logging.info("{:6.1f} seconds{}: {}".format(n[0], import_message, n[1])) + logging.info("") def init_custom_nodes(): extras_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy_extras") diff --git a/server.py b/server.py index 7b4f910e3..d748a47d1 100644 --- a/server.py +++ b/server.py @@ -17,6 +17,7 @@ from io import BytesIO import aiohttp from aiohttp import web +import logging import mimetypes from comfy.cli_args import args @@ -33,7 +34,7 @@ async def send_socket_catch_exception(function, message): try: await function(message) except (aiohttp.ClientError, aiohttp.ClientPayloadError, ConnectionResetError) as err: - print("send error:", err) + logging.warning("send error: {}".format(err)) @web.middleware async def cache_control(request: web.Request, handler): @@ -111,7 +112,7 @@ class PromptServer(): async for msg in ws: if msg.type == aiohttp.WSMsgType.ERROR: - print('ws connection closed with exception %s' % ws.exception()) + logging.warning('ws connection closed with exception %s' % ws.exception()) finally: self.sockets.pop(sid, None) return ws @@ -446,7 +447,7 @@ class PromptServer(): @routes.post("/prompt") async def post_prompt(request): - print("got prompt") + logging.info("got prompt") resp_code = 200 out_string = "" json_data = await request.json() @@ -478,7 +479,7 @@ class PromptServer(): response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]} return web.json_response(response) else: - print("invalid prompt:", valid[1]) + logging.warning("invalid prompt: {}".format(valid[1])) return web.json_response({"error": valid[1], "node_errors": valid[3]}, status=400) else: return web.json_response({"error": "no prompt", "node_errors": []}, status=400) @@ -626,8 +627,8 @@ class PromptServer(): await site.start() if verbose: - print("Starting server\n") - print("To see the GUI go to: http://{}:{}".format(address, port)) + logging.info("Starting server\n") + logging.info("To see the GUI go to: http://{}:{}".format(address, port)) if call_on_start is not None: call_on_start(address, port) @@ -639,7 +640,7 @@ class PromptServer(): try: json_data = handler(json_data) except Exception as e: - print(f"[ERROR] An error occurred during the on_prompt_handler processing") + logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing") traceback.print_exc() return json_data From 2a813c3b09292c9aeab622ddf65d77e5d8171d0d Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 11 Mar 2024 16:24:47 -0400 Subject: [PATCH 4/4] Switch some more prints to logging. --- comfy/ldm/modules/attention.py | 15 ++++++++------- comfy/ldm/modules/diffusionmodules/model.py | 13 +++++++------ .../modules/diffusionmodules/openaimodel.py | 5 +++-- comfy/ldm/modules/sub_quadratic_attention.py | 3 ++- comfy/samplers.py | 3 ++- comfy_extras/nodes_freelunch.py | 6 +++--- comfy_extras/nodes_hypernetwork.py | 3 ++- main.py | 18 +++++++++--------- nodes.py | 2 +- server.py | 6 +++--- 10 files changed, 40 insertions(+), 34 deletions(-) diff --git a/comfy/ldm/modules/attention.py b/comfy/ldm/modules/attention.py index 48399bc07..f116efee3 100644 --- a/comfy/ldm/modules/attention.py +++ b/comfy/ldm/modules/attention.py @@ -4,6 +4,7 @@ import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat from typing import Optional, Any +import logging from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding from .sub_quadratic_attention import efficient_dot_product_attention @@ -20,7 +21,7 @@ ops = comfy.ops.disable_weight_init # CrossAttn precision handling if args.dont_upcast_attention: - print("disabling upcasting of attention") + logging.info("disabling upcasting of attention") _ATTN_PRECISION = "fp16" else: _ATTN_PRECISION = "fp32" @@ -274,12 +275,12 @@ def attention_split(q, k, v, heads, mask=None): model_management.soft_empty_cache(True) if cleared_cache == False: cleared_cache = True - print("out of memory error, emptying cache and trying again") + logging.warning("out of memory error, emptying cache and trying again") continue steps *= 2 if steps > 64: raise e - print("out of memory error, increasing steps and trying again", steps) + logging.warning("out of memory error, increasing steps and trying again {}".format(steps)) else: raise e @@ -351,17 +352,17 @@ def attention_pytorch(q, k, v, heads, mask=None): optimized_attention = attention_basic if model_management.xformers_enabled(): - print("Using xformers cross attention") + logging.info("Using xformers cross attention") optimized_attention = attention_xformers elif model_management.pytorch_attention_enabled(): - print("Using pytorch cross attention") + logging.info("Using pytorch cross attention") optimized_attention = attention_pytorch else: if args.use_split_cross_attention: - print("Using split optimization for cross attention") + logging.info("Using split optimization for cross attention") optimized_attention = attention_split else: - print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") + logging.info("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention") optimized_attention = attention_sub_quad optimized_attention_masked = optimized_attention diff --git a/comfy/ldm/modules/diffusionmodules/model.py b/comfy/ldm/modules/diffusionmodules/model.py index cc81c1f23..fabc5c5e5 100644 --- a/comfy/ldm/modules/diffusionmodules/model.py +++ b/comfy/ldm/modules/diffusionmodules/model.py @@ -5,6 +5,7 @@ import torch.nn as nn import numpy as np from einops import rearrange from typing import Optional, Any +import logging from comfy import model_management import comfy.ops @@ -190,7 +191,7 @@ def slice_attention(q, k, v): steps *= 2 if steps > 128: raise e - print("out of memory error, increasing steps and trying again", steps) + logging.warning("out of memory error, increasing steps and trying again {}".format(steps)) return r1 @@ -235,7 +236,7 @@ def pytorch_attention(q, k, v): out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False) out = out.transpose(2, 3).reshape(B, C, H, W) except model_management.OOM_EXCEPTION as e: - print("scaled_dot_product_attention OOMed: switched to slice attention") + logging.warning("scaled_dot_product_attention OOMed: switched to slice attention") out = slice_attention(q.view(B, -1, C), k.view(B, -1, C).transpose(1, 2), v.view(B, -1, C).transpose(1, 2)).reshape(B, C, H, W) return out @@ -268,13 +269,13 @@ class AttnBlock(nn.Module): padding=0) if model_management.xformers_enabled_vae(): - print("Using xformers attention in VAE") + logging.info("Using xformers attention in VAE") self.optimized_attention = xformers_attention elif model_management.pytorch_attention_enabled(): - print("Using pytorch attention in VAE") + logging.info("Using pytorch attention in VAE") self.optimized_attention = pytorch_attention else: - print("Using split attention in VAE") + logging.info("Using split attention in VAE") self.optimized_attention = normal_attention def forward(self, x): @@ -562,7 +563,7 @@ class Decoder(nn.Module): block_in = ch*ch_mult[self.num_resolutions-1] curr_res = resolution // 2**(self.num_resolutions-1) self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( + logging.debug("Working with z of shape {} = {} dimensions.".format( self.z_shape, np.prod(self.z_shape))) # z to block_in diff --git a/comfy/ldm/modules/diffusionmodules/openaimodel.py b/comfy/ldm/modules/diffusionmodules/openaimodel.py index cf89ae017..d782eff31 100644 --- a/comfy/ldm/modules/diffusionmodules/openaimodel.py +++ b/comfy/ldm/modules/diffusionmodules/openaimodel.py @@ -4,6 +4,7 @@ import torch as th import torch.nn as nn import torch.nn.functional as F from einops import rearrange +import logging from .util import ( checkpoint, @@ -359,7 +360,7 @@ def apply_control(h, control, name): try: h += ctrl except: - print("warning control could not be applied", h.shape, ctrl.shape) + logging.warning("warning control could not be applied {} {}".format(h.shape, ctrl.shape)) return h class UNetModel(nn.Module): @@ -496,7 +497,7 @@ class UNetModel(nn.Module): if isinstance(self.num_classes, int): self.label_emb = nn.Embedding(num_classes, time_embed_dim, dtype=self.dtype, device=device) elif self.num_classes == "continuous": - print("setting up linear c_adm embedding layer") + logging.debug("setting up linear c_adm embedding layer") self.label_emb = nn.Linear(1, time_embed_dim) elif self.num_classes == "sequential": assert adm_in_channels is not None diff --git a/comfy/ldm/modules/sub_quadratic_attention.py b/comfy/ldm/modules/sub_quadratic_attention.py index cb0896b0d..1bc4138c3 100644 --- a/comfy/ldm/modules/sub_quadratic_attention.py +++ b/comfy/ldm/modules/sub_quadratic_attention.py @@ -14,6 +14,7 @@ import torch from torch import Tensor from torch.utils.checkpoint import checkpoint import math +import logging try: from typing import Optional, NamedTuple, List, Protocol @@ -170,7 +171,7 @@ def _get_attention_scores_no_kv_chunking( attn_probs = attn_scores.softmax(dim=-1) del attn_scores except model_management.OOM_EXCEPTION: - print("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead") + logging.warning("ran out of memory while running softmax in _get_attention_scores_no_kv_chunking, trying slower in place softmax instead") attn_scores -= attn_scores.max(dim=-1, keepdim=True).values torch.exp(attn_scores, out=attn_scores) summed = torch.sum(attn_scores, dim=-1, keepdim=True) diff --git a/comfy/samplers.py b/comfy/samplers.py index 6863be4eb..16b4514e1 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -4,6 +4,7 @@ import torch import collections from comfy import model_management import math +import logging def get_area_and_mult(conds, x_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) @@ -625,7 +626,7 @@ def calculate_sigmas_scheduler(model, scheduler_name, steps): elif scheduler_name == "sgm_uniform": sigmas = normal_scheduler(model, steps, sgm=True) else: - print("error invalid scheduler", scheduler_name) + logging.error("error invalid scheduler {}".format(scheduler_name)) return sigmas def sampler_object(name): diff --git a/comfy_extras/nodes_freelunch.py b/comfy_extras/nodes_freelunch.py index 7764aa0b0..6f1d87bf3 100644 --- a/comfy_extras/nodes_freelunch.py +++ b/comfy_extras/nodes_freelunch.py @@ -1,7 +1,7 @@ #code originally taken from: https://github.com/ChenyangSi/FreeU (under MIT License) import torch - +import logging def Fourier_filter(x, threshold, scale): # FFT @@ -49,7 +49,7 @@ class FreeU: try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) except: - print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.") + logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: @@ -95,7 +95,7 @@ class FreeU_V2: try: hsp = Fourier_filter(hsp, threshold=1, scale=scale[1]) except: - print("Device", hsp.device, "does not support the torch.fft functions used in the FreeU node, switching to CPU.") + logging.warning("Device {} does not support the torch.fft functions used in the FreeU node, switching to CPU.".format(hsp.device)) on_cpu_devices[hsp.device] = True hsp = Fourier_filter(hsp.cpu(), threshold=1, scale=scale[1]).to(hsp.device) else: diff --git a/comfy_extras/nodes_hypernetwork.py b/comfy_extras/nodes_hypernetwork.py index f692945a8..cafafa6ab 100644 --- a/comfy_extras/nodes_hypernetwork.py +++ b/comfy_extras/nodes_hypernetwork.py @@ -1,6 +1,7 @@ import comfy.utils import folder_paths import torch +import logging def load_hypernetwork_patch(path, strength): sd = comfy.utils.load_torch_file(path, safe_load=True) @@ -23,7 +24,7 @@ def load_hypernetwork_patch(path, strength): } if activation_func not in valid_activation: - print("Unsupported Hypernetwork format, if you report it I might implement it.", path, " ", activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout) + logging.error("Unsupported Hypernetwork format, if you report it I might implement it. {} {} {} {} {} {}".format(path, activation_func, is_layer_norm, use_dropout, activate_output, last_layer_dropout)) return None out = {} diff --git a/main.py b/main.py index 5d07ce2d1..3dee72e3a 100644 --- a/main.py +++ b/main.py @@ -54,15 +54,15 @@ import threading import gc from comfy.cli_args import args +import logging if os.name == "nt": - import logging logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) if __name__ == "__main__": if args.cuda_device is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device) - print("Set cuda device to:", args.cuda_device) + logging.info("Set cuda device to: {}".format(args.cuda_device)) if args.deterministic: if 'CUBLAS_WORKSPACE_CONFIG' not in os.environ: @@ -88,7 +88,7 @@ def cuda_malloc_warning(): if b in device_name: cuda_malloc_warning = True if cuda_malloc_warning: - print("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") + logging.warning("\nWARNING: this card most likely does not support cuda-malloc, if you get \"CUDA error\" please run ComfyUI with: --disable-cuda-malloc\n") def prompt_worker(q, server): e = execution.PromptExecutor(server) @@ -121,7 +121,7 @@ def prompt_worker(q, server): current_time = time.perf_counter() execution_time = current_time - execution_start_time - print("Prompt executed in {:.2f} seconds".format(execution_time)) + logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) flags = q.get_flags() free_memory = flags.get("free_memory", False) @@ -182,14 +182,14 @@ def load_extra_path_config(yaml_path): full_path = y if base_path is not None: full_path = os.path.join(base_path, full_path) - print("Adding extra search path", x, full_path) + logging.info("Adding extra search path {} {}".format(x, full_path)) folder_paths.add_model_folder_path(x, full_path) if __name__ == "__main__": if args.temp_directory: temp_dir = os.path.join(os.path.abspath(args.temp_directory), "temp") - print(f"Setting temp directory to: {temp_dir}") + logging.info(f"Setting temp directory to: {temp_dir}") folder_paths.set_temp_directory(temp_dir) cleanup_temp() @@ -224,7 +224,7 @@ if __name__ == "__main__": if args.output_directory: output_dir = os.path.abspath(args.output_directory) - print(f"Setting output directory to: {output_dir}") + logging.info(f"Setting output directory to: {output_dir}") folder_paths.set_output_directory(output_dir) #These are the default folders that checkpoints, clip and vae models will be saved to when using CheckpointSave, etc.. nodes @@ -234,7 +234,7 @@ if __name__ == "__main__": if args.input_directory: input_dir = os.path.abspath(args.input_directory) - print(f"Setting input directory to: {input_dir}") + logging.info(f"Setting input directory to: {input_dir}") folder_paths.set_input_directory(input_dir) if args.quick_test_for_ci: @@ -252,6 +252,6 @@ if __name__ == "__main__": try: loop.run_until_complete(run(server, address=args.listen, port=args.port, verbose=not args.dont_print_server, call_on_start=call_on_start)) except KeyboardInterrupt: - print("\nStopped server") + logging.info("\nStopped server") cleanup_temp() diff --git a/nodes.py b/nodes.py index e2f35dc27..453f6e606 100644 --- a/nodes.py +++ b/nodes.py @@ -1904,7 +1904,7 @@ def load_custom_node(module_path, ignore=set()): return False except Exception as e: logging.warning(traceback.format_exc()) - logging.warning(f"Cannot import {module_path} module for custom nodes:", e) + logging.warning(f"Cannot import {module_path} module for custom nodes: {e}") return False def load_custom_nodes(): diff --git a/server.py b/server.py index d748a47d1..5642bd5e2 100644 --- a/server.py +++ b/server.py @@ -413,8 +413,8 @@ class PromptServer(): try: out[x] = node_info(x) except Exception as e: - print(f"[ERROR] An error occurred while retrieving information for the '{x}' node.", file=sys.stderr) - traceback.print_exc() + logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.") + logging.error(traceback.format_exc()) return web.json_response(out) @routes.get("/object_info/{node_class}") @@ -641,6 +641,6 @@ class PromptServer(): json_data = handler(json_data) except Exception as e: logging.warning(f"[ERROR] An error occurred during the on_prompt_handler processing") - traceback.print_exc() + logging.warning(traceback.format_exc()) return json_data