mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-17 18:13:01 +08:00
Merge branch 'master' into dr-support-pip-cm
This commit is contained in:
commit
b180f47d0e
@ -345,9 +345,9 @@ try:
|
|||||||
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
|
if torch_version_numeric >= (2, 7): # works on 2.6 but doesn't actually seem to improve much
|
||||||
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950
|
if any((a in arch) for a in ["gfx90a", "gfx942", "gfx1100", "gfx1101", "gfx1151"]): # TODO: more arches, TODO: gfx950
|
||||||
ENABLE_PYTORCH_ATTENTION = True
|
ENABLE_PYTORCH_ATTENTION = True
|
||||||
# if torch_version_numeric >= (2, 8):
|
if rocm_version >= (7, 0):
|
||||||
# if any((a in arch) for a in ["gfx1201"]):
|
if any((a in arch) for a in ["gfx1201"]):
|
||||||
# ENABLE_PYTORCH_ATTENTION = True
|
ENABLE_PYTORCH_ATTENTION = True
|
||||||
if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4):
|
if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4):
|
||||||
if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx942", "gfx950"]): # TODO: more arches
|
if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx942", "gfx950"]): # TODO: more arches
|
||||||
SUPPORT_FP8_OPS = True
|
SUPPORT_FP8_OPS = True
|
||||||
|
|||||||
@ -276,8 +276,13 @@ class VAE:
|
|||||||
if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
|
if 'decoder.up_blocks.0.resnets.0.norm1.weight' in sd.keys(): #diffusers format
|
||||||
sd = diffusers_convert.convert_vae_state_dict(sd)
|
sd = diffusers_convert.convert_vae_state_dict(sd)
|
||||||
|
|
||||||
self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) #These are for AutoencoderKL and need tweaking (should be lower)
|
if model_management.is_amd():
|
||||||
self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype)
|
VAE_KL_MEM_RATIO = 2.73
|
||||||
|
else:
|
||||||
|
VAE_KL_MEM_RATIO = 1.0
|
||||||
|
|
||||||
|
self.memory_used_encode = lambda shape, dtype: (1767 * shape[2] * shape[3]) * model_management.dtype_size(dtype) * VAE_KL_MEM_RATIO #These are for AutoencoderKL and need tweaking (should be lower)
|
||||||
|
self.memory_used_decode = lambda shape, dtype: (2178 * shape[2] * shape[3] * 64) * model_management.dtype_size(dtype) * VAE_KL_MEM_RATIO
|
||||||
self.downscale_ratio = 8
|
self.downscale_ratio = 8
|
||||||
self.upscale_ratio = 8
|
self.upscale_ratio = 8
|
||||||
self.latent_channels = 4
|
self.latent_channels = 4
|
||||||
|
|||||||
@ -39,7 +39,11 @@ if hasattr(torch.serialization, "add_safe_globals"): # TODO: this was added in
|
|||||||
pass
|
pass
|
||||||
ModelCheckpoint.__module__ = "pytorch_lightning.callbacks.model_checkpoint"
|
ModelCheckpoint.__module__ = "pytorch_lightning.callbacks.model_checkpoint"
|
||||||
|
|
||||||
from numpy.core.multiarray import scalar
|
def scalar(*args, **kwargs):
|
||||||
|
from numpy.core.multiarray import scalar as sc
|
||||||
|
return sc(*args, **kwargs)
|
||||||
|
scalar.__module__ = "numpy.core.multiarray"
|
||||||
|
|
||||||
from numpy import dtype
|
from numpy import dtype
|
||||||
from numpy.dtypes import Float64DType
|
from numpy.dtypes import Float64DType
|
||||||
from _codecs import encode
|
from _codecs import encode
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user