mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-11 23:00:51 +08:00
Merge b3fb40adb8 into a9c35256bc
This commit is contained in:
commit
0219e5be6e
@ -101,6 +101,8 @@ parser.add_argument("--preview-method", type=LatentPreviewMethod, default=Latent
|
|||||||
|
|
||||||
parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.")
|
parser.add_argument("--preview-size", type=int, default=512, help="Sets the maximum preview size for sampler nodes.")
|
||||||
|
|
||||||
|
parser.add_argument("--preview-stream", action="store_true", help="Use a CUDA Stream to reduce performance cost of previews.")
|
||||||
|
|
||||||
cache_group = parser.add_mutually_exclusive_group()
|
cache_group = parser.add_mutually_exclusive_group()
|
||||||
cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.")
|
cache_group.add_argument("--cache-classic", action="store_true", help="Use the old style (aggressive) caching.")
|
||||||
cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.")
|
cache_group.add_argument("--cache-lru", type=int, default=0, help="Use LRU caching with a maximum of N node results cached. May use more RAM/VRAM.")
|
||||||
|
|||||||
@ -6,18 +6,28 @@ import comfy.model_management
|
|||||||
import folder_paths
|
import folder_paths
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
import logging
|
import logging
|
||||||
|
from contextlib import nullcontext
|
||||||
|
import threading
|
||||||
|
|
||||||
MAX_PREVIEW_RESOLUTION = args.preview_size
|
MAX_PREVIEW_RESOLUTION = args.preview_size
|
||||||
|
|
||||||
def preview_to_image(latent_image):
|
if args.preview_stream:
|
||||||
latents_ubyte = (((latent_image + 1.0) / 2.0).clamp(0, 1) # change scale from -1..1 to 0..1
|
preview_stream = torch.cuda.Stream()
|
||||||
.mul(0xFF) # to 0..255
|
preview_context = torch.cuda.stream(preview_stream)
|
||||||
)
|
else:
|
||||||
if comfy.model_management.directml_enabled:
|
preview_context = nullcontext()
|
||||||
latents_ubyte = latents_ubyte.to(dtype=torch.uint8)
|
|
||||||
latents_ubyte = latents_ubyte.to(device="cpu", dtype=torch.uint8, non_blocking=comfy.model_management.device_supports_non_blocking(latent_image.device))
|
|
||||||
|
|
||||||
return Image.fromarray(latents_ubyte.numpy())
|
def preview_to_image(preview_image: torch.Tensor):
|
||||||
|
# no reason why any of this has to happen on GPU, also non-blocking transfers to cpu aren't safe ever
|
||||||
|
# but we don't care about it blocking because the main stream is fine
|
||||||
|
preview_image = preview_image.cpu()
|
||||||
|
|
||||||
|
preview_image.clamp_(-1.0, 1.0)
|
||||||
|
preview_image.add_(1.0)
|
||||||
|
preview_image.mul_(127.5)
|
||||||
|
preview_image.round_() # default behavior when casting is truncate which is wrong for image processing
|
||||||
|
|
||||||
|
return Image.fromarray(preview_image.to(dtype=torch.uint8).numpy())
|
||||||
|
|
||||||
class LatentPreviewer:
|
class LatentPreviewer:
|
||||||
def decode_latent_to_preview(self, x0):
|
def decode_latent_to_preview(self, x0):
|
||||||
@ -97,12 +107,23 @@ def prepare_callback(model, steps, x0_output_dict=None):
|
|||||||
|
|
||||||
pbar = comfy.utils.ProgressBar(steps)
|
pbar = comfy.utils.ProgressBar(steps)
|
||||||
def callback(step, x0, x, total_steps):
|
def callback(step, x0, x, total_steps):
|
||||||
if x0_output_dict is not None:
|
@torch.inference_mode
|
||||||
x0_output_dict["x0"] = x0
|
def worker():
|
||||||
|
if x0_output_dict is not None:
|
||||||
|
x0_output_dict["x0"] = x0
|
||||||
|
|
||||||
preview_bytes = None
|
preview_bytes = None
|
||||||
if previewer:
|
if previewer:
|
||||||
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
|
with preview_context:
|
||||||
pbar.update_absolute(step + 1, total_steps, preview_bytes)
|
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
|
||||||
|
pbar.update_absolute(step + 1, total_steps, preview_bytes)
|
||||||
|
|
||||||
|
if args.preview_stream:
|
||||||
|
# must wait for default stream to catch up else we will decode a garbage tensor
|
||||||
|
# the default stream will not, under any circumstances, stop because of this
|
||||||
|
preview_stream.wait_stream(torch.cuda.default_stream())
|
||||||
|
threading.Thread(target=worker, daemon=True).start()
|
||||||
|
else: worker() # no point in threading this off if there's no separate stream
|
||||||
|
|
||||||
return callback
|
return callback
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user