mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-12 22:42:36 +08:00
Merge branch 'comfyanonymous:master' into fix/secure-combo
This commit is contained in:
commit
4a8d87f175
@ -16,11 +16,14 @@ if model_management.xformers_enabled():
|
|||||||
import xformers
|
import xformers
|
||||||
import xformers.ops
|
import xformers.ops
|
||||||
|
|
||||||
# CrossAttn precision handling
|
|
||||||
import os
|
|
||||||
_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")
|
|
||||||
|
|
||||||
from comfy.cli_args import args
|
from comfy.cli_args import args
|
||||||
|
# CrossAttn precision handling
|
||||||
|
if args.dont_upcast_attention:
|
||||||
|
print("disabling upcasting of attention")
|
||||||
|
_ATTN_PRECISION = "fp16"
|
||||||
|
else:
|
||||||
|
_ATTN_PRECISION = "fp32"
|
||||||
|
|
||||||
|
|
||||||
def exists(val):
|
def exists(val):
|
||||||
return val is not None
|
return val is not None
|
||||||
|
|||||||
@ -334,19 +334,19 @@ def unload_if_low_vram(model):
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
def unet_offload_device():
|
def unet_offload_device():
|
||||||
if vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.SHARED:
|
if vram_state == VRAMState.HIGH_VRAM:
|
||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
else:
|
else:
|
||||||
return torch.device("cpu")
|
return torch.device("cpu")
|
||||||
|
|
||||||
def text_encoder_offload_device():
|
def text_encoder_offload_device():
|
||||||
if args.gpu_only or vram_state == VRAMState.SHARED:
|
if args.gpu_only:
|
||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
else:
|
else:
|
||||||
return torch.device("cpu")
|
return torch.device("cpu")
|
||||||
|
|
||||||
def text_encoder_device():
|
def text_encoder_device():
|
||||||
if args.gpu_only or vram_state == VRAMState.SHARED:
|
if args.gpu_only:
|
||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
|
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
|
||||||
if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough.
|
if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough.
|
||||||
@ -360,7 +360,7 @@ def vae_device():
|
|||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
|
|
||||||
def vae_offload_device():
|
def vae_offload_device():
|
||||||
if args.gpu_only or vram_state == VRAMState.SHARED:
|
if args.gpu_only:
|
||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
else:
|
else:
|
||||||
return torch.device("cpu")
|
return torch.device("cpu")
|
||||||
|
|||||||
4
main.py
4
main.py
@ -14,10 +14,6 @@ if os.name == "nt":
|
|||||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if args.dont_upcast_attention:
|
|
||||||
print("disabling upcasting of attention")
|
|
||||||
os.environ['ATTN_PRECISION'] = "fp16"
|
|
||||||
|
|
||||||
if args.cuda_device is not None:
|
if args.cuda_device is not None:
|
||||||
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
|
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda_device)
|
||||||
print("Set cuda device to:", args.cuda_device)
|
print("Set cuda device to:", args.cuda_device)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user