From 0e2a834c86d6a7f4663ca6f3e395e09f8011a94d Mon Sep 17 00:00:00 2001 From: Simon Lui <502929+simonlui@users.noreply.github.com> Date: Thu, 24 Aug 2023 23:37:37 -0700 Subject: [PATCH] Make torch.compile do something properly and work once eigenops is upgraded. --- comfy/cli_args.py | 2 +- comfy/model_management.py | 3 --- comfy/sample.py | 5 ++++- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/comfy/cli_args.py b/comfy/cli_args.py index 1e49f9a50..7cdaa0507 100644 --- a/comfy/cli_args.py +++ b/comfy/cli_args.py @@ -61,7 +61,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE" parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.") parser.add_argument("--disable-torch-compile", action="store_true", help="Disables torch.compile for loading models.") -parser.add_argument("--torch-compile-fullgraph", action="store_true", default=True, help="torch.compile argument for if the model should be compiled into a single graph.") +parser.add_argument("--torch-compile-fullgraph", action="store_true", default=False, help="torch.compile argument for if the model should be compiled into a single graph.") parser.add_argument("--torch-compile-backend", type=str, default="inductor", help="torch.compile argument for what backend to use. See Pytorch documentation for available backends to choose from.") parser.add_argument("--torch-compile-mode", type=str, default="default", help="torch.compile argument for what compile mode to use. Options include 'default', 'reduce-overhead', or 'max-autotune'.") diff --git a/comfy/model_management.py b/comfy/model_management.py index 1566b931e..fc0cb9011 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -273,9 +273,6 @@ class LoadedModel: if xpu_available and not args.disable_ipex_optimize: self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True) - if not args.disable_torch_compile: - self.real_model = torch.compile(self.real_model, fullgraph=args.torch_compile_fullgraph, backend=args.torch_compile_backend, mode=args.torch_compile_mode) - return self.real_model def model_unload(self): diff --git a/comfy/sample.py b/comfy/sample.py index d7292024e..4824895cb 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -1,3 +1,4 @@ +from comfy.cli_args import args import torch import comfy.model_management import comfy.samplers @@ -81,13 +82,15 @@ def sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative comfy.model_management.load_models_gpu([model] + models, comfy.model_management.batch_area_memory(noise.shape[0] * noise.shape[2] * noise.shape[3])) real_model = model.model + if not args.disable_torch_compile: + real_model.diffusion_model = torch.compile(real_model.diffusion_model, fullgraph=args.torch_compile_fullgraph, backend=args.torch_compile_backend, mode=args.torch_compile_mode) + noise = noise.to(device) latent_image = latent_image.to(device) positive_copy = broadcast_cond(positive, noise.shape[0], device) negative_copy = broadcast_cond(negative, noise.shape[0], device) - sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=denoise, model_options=model.model_options) samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)