import torch from PIL import Image from comfy.cli_args import args, LatentPreviewMethod from comfy.taesd.taesd import TAESD import comfy.model_management import folder_paths import comfy.utils import logging from contextlib import nullcontext import threading MAX_PREVIEW_RESOLUTION = args.preview_size if args.preview_stream: preview_stream = torch.cuda.Stream() preview_context = torch.cuda.stream(preview_stream) else: preview_context = nullcontext() def preview_to_image(preview_image: torch.Tensor): # no reason why any of this has to happen on GPU, also non-blocking transfers to cpu aren't safe ever # but we don't care about it blocking because the main stream is fine preview_image = preview_image.cpu() preview_image.add_(1.0) preview_image.div_(2.0) preview_image.clamp_(0, 1) # change scale from -1..1 to 0..1 and clamp preview_image.mul_(255.) # change to uint8 range preview_image.round_() # default behavior when casting is truncate which is wrong for image processing return Image.fromarray(preview_image.to(dtype=torch.uint8).numpy()) class LatentPreviewer: def decode_latent_to_preview(self, x0): pass def decode_latent_to_preview_image(self, preview_format, x0): preview_image = self.decode_latent_to_preview(x0) return ("JPEG", preview_image, MAX_PREVIEW_RESOLUTION) class TAESDPreviewerImpl(LatentPreviewer): def __init__(self, taesd): self.taesd = taesd def decode_latent_to_preview(self, x0): x_sample = self.taesd.decode(x0[:1])[0].movedim(0, 2) return preview_to_image(x_sample) class Latent2RGBPreviewer(LatentPreviewer): def __init__(self, latent_rgb_factors, latent_rgb_factors_bias=None): self.latent_rgb_factors = torch.tensor(latent_rgb_factors, device="cpu").transpose(0, 1) self.latent_rgb_factors_bias = None if latent_rgb_factors_bias is not None: self.latent_rgb_factors_bias = torch.tensor(latent_rgb_factors_bias, device="cpu") def decode_latent_to_preview(self, x0): self.latent_rgb_factors = self.latent_rgb_factors.to(dtype=x0.dtype, device=x0.device) if self.latent_rgb_factors_bias is not None: self.latent_rgb_factors_bias = self.latent_rgb_factors_bias.to(dtype=x0.dtype, device=x0.device) if x0.ndim == 5: x0 = x0[0, :, 0] else: x0 = x0[0] latent_image = torch.nn.functional.linear(x0.movedim(0, -1), self.latent_rgb_factors, bias=self.latent_rgb_factors_bias) # latent_image = x0[0].permute(1, 2, 0) @ self.latent_rgb_factors return preview_to_image(latent_image) def get_previewer(device, latent_format): previewer = None method = args.preview_method if method != LatentPreviewMethod.NoPreviews: # TODO previewer methods taesd_decoder_path = None if latent_format.taesd_decoder_name is not None: taesd_decoder_path = next( (fn for fn in folder_paths.get_filename_list("vae_approx") if fn.startswith(latent_format.taesd_decoder_name)), "" ) taesd_decoder_path = folder_paths.get_full_path("vae_approx", taesd_decoder_path) if method == LatentPreviewMethod.Auto: method = LatentPreviewMethod.Latent2RGB if method == LatentPreviewMethod.TAESD: if taesd_decoder_path: taesd = TAESD(None, taesd_decoder_path, latent_channels=latent_format.latent_channels).to(device) previewer = TAESDPreviewerImpl(taesd) else: logging.warning("Warning: TAESD previews enabled, but could not find models/vae_approx/{}".format(latent_format.taesd_decoder_name)) if previewer is None: if latent_format.latent_rgb_factors is not None: previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias) return previewer def prepare_callback(model, steps, x0_output_dict=None): preview_format = "JPEG" if preview_format not in ["JPEG", "PNG"]: preview_format = "JPEG" previewer = get_previewer(model.load_device, model.model.latent_format) pbar = comfy.utils.ProgressBar(steps) def callback(step, x0, x, total_steps): @torch.inference_mode def worker(): if x0_output_dict is not None: x0_output_dict["x0"] = x0 preview_bytes = None if previewer: with preview_context: preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0) pbar.update_absolute(step + 1, total_steps, preview_bytes) if args.preview_stream: # must wait for default stream to catch up else we will decode a garbage tensor # the default stream will not, under any circumstances, stop because of this preview_stream.wait_stream(torch.cuda.default_stream()) threading.Thread(target=worker, daemon=True).start() else: worker() # no point in threading this off if there's no separate stream return callback