mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-10 13:32:36 +08:00
Add in torch.compile support for model loading. Some nodes may be excluded.
This commit is contained in:
parent
0d7b0a4dc7
commit
cf4d7c485f
@ -60,6 +60,11 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE"
|
||||
|
||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize when loading models with Intel GPUs.")
|
||||
|
||||
parser.add_argument("--disable-torch-compile", action="store_true", help="Disables torch.compile for loading models.")
|
||||
parser.add_argument("--torch-compile-fullgraph", action="store_true", default=True, help="torch.compile argument for if the model should be compiled into a single graph.")
|
||||
parser.add_argument("--torch-compile-backend", type=str, default="inductor", help="torch.compile argument for what backend to use. See Pytorch documentation for available backends to choose from.")
|
||||
parser.add_argument("--torch-compile-mode", type=str, default="default", help="torch.compile argument for what compile mode to use. Options include 'default', 'reduce-overhead', or 'max-autotune'.")
|
||||
|
||||
class LatentPreviewMethod(enum.Enum):
|
||||
NoPreviews = "none"
|
||||
Auto = "auto"
|
||||
@ -83,10 +88,8 @@ vram_group.add_argument("--lowvram", action="store_true", help="Split the unet i
|
||||
vram_group.add_argument("--novram", action="store_true", help="When lowvram isn't enough.")
|
||||
vram_group.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
|
||||
|
||||
|
||||
parser.add_argument("--disable-smart-memory", action="store_true", help="Force ComfyUI to agressively offload to regular ram instead of keeping models in vram when it can.")
|
||||
|
||||
|
||||
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
|
||||
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
|
||||
parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build: Enable convenient things that most people using the standalone windows build will probably enjoy (like auto opening the page on startup).")
|
||||
|
||||
@ -273,6 +273,9 @@ class LoadedModel:
|
||||
if xpu_available and not args.disable_ipex_optimize:
|
||||
self.real_model = torch.xpu.optimize(self.real_model.eval(), inplace=True, auto_kernel_selection=True, graph_mode=True)
|
||||
|
||||
if not args.disable_torch_compile:
|
||||
self.real_model = torch.compile(self.real_model, fullgraph=args.torch_compile_fullgraph, backend=args.torch_compile_backend, mode=args.torch_compile_mode)
|
||||
|
||||
return self.real_model
|
||||
|
||||
def model_unload(self):
|
||||
|
||||
Loading…
Reference in New Issue
Block a user