Compare commits

...

2 Commits

Author SHA1 Message Date
Chakib Benziane
c33ce279c7
Merge 6f4d889053 into 9125613b53 2026-01-16 18:32:25 +01:00
blob42
6f4d889053
feat: add --total-ram option for controlling visible system RAM in Comfy
Adds a new command-line argument `--total-ram` to limit the amount of
system RAM that ComfyUI considers available, allowing users to simulate
lower memory environments. This enables more predictable behavior when
testing or running on systems with limited resources.

Rationale:

I run Comfy inside a Docker container. Using `mem_limit` doesn't hide
total system RAM from Comfy, so OOM can occur easily. Cache pressure
limits cause frequent out-of-memory errors. Adding this flag allows
precise control over visible memory.

Signed-off-by: blob42 <contact@blob42.xyz>
2025-11-27 16:29:38 +01:00
2 changed files with 16 additions and 4 deletions

View File

@ -90,6 +90,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE"
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)")
class LatentPreviewMethod(enum.Enum):
NoPreviews = "none"

View File

@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False):
if dev is None:
dev = get_torch_device()
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
mem_total = psutil.virtual_memory().total
if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"):
mem_total = 0
if args.total_ram != 0:
mem_total = args.total_ram * 1024 * 1024
else:
mem_total = psutil.virtual_memory().total
mem_total_torch = mem_total
else:
if directml_enabled:
@ -236,8 +240,15 @@ def mac_version():
return None
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
total_ram = psutil.virtual_memory().total / (1024 * 1024)
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
total_ram = 0
if args.total_ram != 0:
total_ram = args.total_ram * (1024) # arg in GB
else:
total_ram = psutil.virtual_memory().total / (1024 * 1024)
logging.info(
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)
)
try:
logging.info("pytorch version: {}".format(torch_version))