From e7cc72cee0cdb05cb2d3415ccba0de9468ddc904 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Fri, 23 May 2025 18:12:10 -0400 Subject: [PATCH 1/4] Add support for live previews on separate stream --- latent_preview.py | 50 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/latent_preview.py b/latent_preview.py index 95d3cb733..453bb210b 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -6,18 +6,29 @@ import comfy.model_management import folder_paths import comfy.utils import logging +from contextlib import nullcontext +import threading MAX_PREVIEW_RESOLUTION = args.preview_size -def preview_to_image(latent_image): - latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1 - .mul(0xFF) # to 0..255 - ) - if comfy.model_management.directml_enabled: - latents_ubyte = latents_ubyte.to(dtype=torch.uint8) - latents_ubyte = latents_ubyte.to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device)) +if args.preview_stream: + preview_stream = torch.cuda.Stream() + preview_context = torch.cuda.stream(preview_stream) +else: + preview_context = nullcontext() - return Image.fromarray(latents_ubyte.numpy()) +def preview_to_image(preview_image: torch.Tensor): + # no reason why any of this has to happen on GPU, also non-blocking transfers to cpu aren't safe ever + # but we don't care about it blocking because the main stream is fine + preview_image = preview_image.cpu() + + preview_image.add_(1.0) + preview_image.div_(2.0) + preview_image.clamp_(0, 1) # change scale from -1..1 to 0..1 and clamp + preview_image.mul_(255.) # change to uint8 range + preview_image.round_() # default behavior when casting is truncate which is wrong for image processing + + return Image.fromarray(preview_image.to(dtype=torch.uint8).numpy()) class LatentPreviewer: def decode_latent_to_preview(self, x0): @@ -97,12 +108,23 @@ def prepare_callback(model, steps, x0_output_dict=None): pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): - if x0_output_dict is not None: - x0_output_dict["x0"] = x0 + @torch.inference_mode + def worker(): + if x0_output_dict is not None: + x0_output_dict["x0"] = x0 - preview_bytes = None - if previewer: - preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) - pbar.update_absolute(step + 1, total_steps, preview_bytes) + preview_bytes = None + if previewer: + with preview_context: + preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) + pbar.update_absolute(step + 1, total_steps, preview_bytes) + + if args.preview_stream: + # must wait for default stream to catch up else we will decode a garbage tensor + # the default stream will not, under any circumstances, stop because of this + preview_stream.wait_stream(torch.cuda.default_stream()) + threading.Thread(target=worker, daemon=True).start() + else: worker() # no point in threading this off if there's no separate stream + return callback From 7dd4265cc03ef2fedf1935c17b7a9c74ae5fba0b Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Fri, 23 May 2025 18:13:44 -0400 Subject: [PATCH 2/4] Add preview-stream cli arg --- comfy/cli_args.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 4fb675f99..e25b00e46 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -100,6 +100,8 @@ parser.add_argument("--preview-method", type=LatentPreviewMethod, default=Latent parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.") +parser.add_argument("--preview-stream", action="store_true", help="Use a CUDA Stream to reduce performance cost of previews.") + cache_group = parser.add_mutually_exclusive_group() cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.") cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.") From 1fd5f330a834eb6c945e43a0c065a35ebba5e84e Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Sun, 25 May 2025 10:56:00 -0400 Subject: [PATCH 3/4] div(2).mul(255) -> mul(127.5) --- latent_preview.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/latent_preview.py b/latent_preview.py index 453bb210b..e59da1165 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -23,9 +23,8 @@ def preview_to_image(preview_image: torch.Tensor): preview_image = preview_image.cpu() preview_image.add_(1.0) - preview_image.div_(2.0) + preview_image.mul_(127.5) preview_image.clamp_(0, 1) # change scale from -1..1 to 0..1 and clamp - preview_image.mul_(255.) # change to uint8 range preview_image.round_() # default behavior when casting is truncate which is wrong for image processing return Image.fromarray(preview_image.to(dtype=torch.uint8).numpy()) From b3fb40adb864eaea57a00df5feabbcd915bc0872 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Sun, 25 May 2025 11:19:49 -0400 Subject: [PATCH 4/4] clamp needed to change to match --- latent_preview.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/latent_preview.py b/latent_preview.py index e59da1165..0bec54d33 100644 --- a/latent_preview.py +++ b/latent_preview.py @@ -22,9 +22,9 @@ def preview_to_image(preview_image: torch.Tensor): # but we don't care about it blocking because the main stream is fine preview_image = preview_image.cpu() + preview_image.clamp_(-1.0, 1.0) preview_image.add_(1.0) preview_image.mul_(127.5) - preview_image.clamp_(0, 1) # change scale from -1..1 to 0..1 and clamp preview_image.round_() # default behavior when casting is truncate which is wrong for image processing return Image.fromarray(preview_image.to(dtype=torch.uint8).numpy())