mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-13 23:12:35 +08:00
Command option to set different devices for extensions
This commit is contained in:
parent
3fcab0c642
commit
6d8fa05f86
@ -43,6 +43,7 @@ parser.add_argument("--input-directory", type=str, default=None, help="Set the C
|
|||||||
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
parser.add_argument("--auto-launch", action="store_true", help="Automatically launch ComfyUI in the default browser.")
|
||||||
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
|
||||||
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
parser.add_argument("--cuda-device", type=int, default=None, metavar="DEVICE_ID", help="Set the id of the cuda device this instance will use.")
|
||||||
|
parser.add_argument("--extension-device", type=str, default=None, help="Set the device for extensions in the format 'extension:device;extension:device;...'.")
|
||||||
cm_group = parser.add_mutually_exclusive_group()
|
cm_group = parser.add_mutually_exclusive_group()
|
||||||
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
|
cm_group.add_argument("--cuda-malloc", action="store_true", help="Enable cudaMallocAsync (enabled by default for torch 2.0 and up).")
|
||||||
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
|
cm_group.add_argument("--disable-cuda-malloc", action="store_true", help="Disable cudaMallocAsync.")
|
||||||
|
|||||||
@ -41,6 +41,12 @@ if args.directml is not None:
|
|||||||
# torch_directml.disable_tiled_resources(True)
|
# torch_directml.disable_tiled_resources(True)
|
||||||
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
|
lowvram_available = False #TODO: need to find a way to get free memory in directml before this can be enabled by default.
|
||||||
|
|
||||||
|
extensions_devices = {}
|
||||||
|
if args.extension_device is not None:
|
||||||
|
for ext_dev in args.extension_device.split(";"):
|
||||||
|
ext, dev = ext_dev.split(":")
|
||||||
|
extensions_devices[ext] = dev
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import intel_extension_for_pytorch as ipex
|
import intel_extension_for_pytorch as ipex
|
||||||
if torch.xpu.is_available():
|
if torch.xpu.is_available():
|
||||||
@ -69,6 +75,12 @@ def is_intel_xpu():
|
|||||||
def get_torch_device():
|
def get_torch_device():
|
||||||
global directml_enabled
|
global directml_enabled
|
||||||
global cpu_state
|
global cpu_state
|
||||||
|
global extensions_devices
|
||||||
|
|
||||||
|
extension = comfy.utils.get_extension_calling()
|
||||||
|
if extension is not None and extension in extensions_devices:
|
||||||
|
return torch.device(extensions_devices[extension])
|
||||||
|
|
||||||
if directml_enabled:
|
if directml_enabled:
|
||||||
global directml_device
|
global directml_device
|
||||||
return directml_device
|
return directml_device
|
||||||
|
|||||||
@ -4,8 +4,19 @@ import struct
|
|||||||
import comfy.checkpoint_pickle
|
import comfy.checkpoint_pickle
|
||||||
import safetensors.torch
|
import safetensors.torch
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
|
def get_extension_calling():
|
||||||
|
for frame in inspect.stack():
|
||||||
|
if "/custom_nodes/" in frame.filename:
|
||||||
|
stack_module = inspect.getmodule(frame[0])
|
||||||
|
if stack_module:
|
||||||
|
return re.sub(r".*\.?custom_nodes\.([^\.]+).*", r"\1", stack_module.__name__).split(".")[0]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
def load_torch_file(ckpt, safe_load=False, device=None):
|
def load_torch_file(ckpt, safe_load=False, device=None):
|
||||||
if device is None:
|
if device is None:
|
||||||
device = torch.device("cpu")
|
device = torch.device("cpu")
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user