mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-30 16:20:17 +08:00
Compare commits
4 Commits
c33ce279c7
...
b0176429cb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0176429cb | ||
|
|
7ac999bf30 | ||
|
|
0c6b36c6ac | ||
|
|
6f4d889053 |
@ -90,6 +90,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE"
|
||||
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
||||
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
|
||||
parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)")
|
||||
|
||||
class LatentPreviewMethod(enum.Enum):
|
||||
NoPreviews = "none"
|
||||
|
||||
@ -66,6 +66,7 @@ class ClipVisionModel():
|
||||
outputs = Output()
|
||||
outputs["last_hidden_state"] = out[0].to(comfy.model_management.intermediate_device())
|
||||
outputs["image_embeds"] = out[2].to(comfy.model_management.intermediate_device())
|
||||
outputs["image_sizes"] = [pixel_values.shape[1:]] * pixel_values.shape[0]
|
||||
if self.return_all_hidden_states:
|
||||
all_hs = out[1].to(comfy.model_management.intermediate_device())
|
||||
outputs["penultimate_hidden_states"] = all_hs[:, -2]
|
||||
|
||||
@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False):
|
||||
if dev is None:
|
||||
dev = get_torch_device()
|
||||
|
||||
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
|
||||
mem_total = psutil.virtual_memory().total
|
||||
if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"):
|
||||
mem_total = 0
|
||||
if args.total_ram != 0:
|
||||
mem_total = args.total_ram * 1024 * 1024
|
||||
else:
|
||||
mem_total = psutil.virtual_memory().total
|
||||
mem_total_torch = mem_total
|
||||
else:
|
||||
if directml_enabled:
|
||||
@ -236,8 +240,15 @@ def mac_version():
|
||||
return None
|
||||
|
||||
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
|
||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
|
||||
|
||||
total_ram = 0
|
||||
if args.total_ram != 0:
|
||||
total_ram = args.total_ram * (1024) # arg in GB
|
||||
else:
|
||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||
logging.info(
|
||||
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)
|
||||
)
|
||||
|
||||
try:
|
||||
logging.info("pytorch version: {}".format(torch_version))
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.36.14
|
||||
comfyui-workflow-templates==0.8.10
|
||||
comfyui-workflow-templates==0.8.11
|
||||
comfyui-embedded-docs==0.4.0
|
||||
torch
|
||||
torchsde
|
||||
|
||||
Loading…
Reference in New Issue
Block a user