mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-22 12:30:50 +08:00
Merge branch 'master' into v3-improvements
This commit is contained in:
commit
bef33564c1
@ -227,6 +227,7 @@ class Encoder3d(nn.Module):
|
|||||||
def __init__(self,
|
def __init__(self,
|
||||||
dim=128,
|
dim=128,
|
||||||
z_dim=4,
|
z_dim=4,
|
||||||
|
input_channels=3,
|
||||||
dim_mult=[1, 2, 4, 4],
|
dim_mult=[1, 2, 4, 4],
|
||||||
num_res_blocks=2,
|
num_res_blocks=2,
|
||||||
attn_scales=[],
|
attn_scales=[],
|
||||||
@ -245,7 +246,7 @@ class Encoder3d(nn.Module):
|
|||||||
scale = 1.0
|
scale = 1.0
|
||||||
|
|
||||||
# init block
|
# init block
|
||||||
self.conv1 = CausalConv3d(3, dims[0], 3, padding=1)
|
self.conv1 = CausalConv3d(input_channels, dims[0], 3, padding=1)
|
||||||
|
|
||||||
# downsample blocks
|
# downsample blocks
|
||||||
downsamples = []
|
downsamples = []
|
||||||
@ -331,6 +332,7 @@ class Decoder3d(nn.Module):
|
|||||||
def __init__(self,
|
def __init__(self,
|
||||||
dim=128,
|
dim=128,
|
||||||
z_dim=4,
|
z_dim=4,
|
||||||
|
output_channels=3,
|
||||||
dim_mult=[1, 2, 4, 4],
|
dim_mult=[1, 2, 4, 4],
|
||||||
num_res_blocks=2,
|
num_res_blocks=2,
|
||||||
attn_scales=[],
|
attn_scales=[],
|
||||||
@ -378,7 +380,7 @@ class Decoder3d(nn.Module):
|
|||||||
# output blocks
|
# output blocks
|
||||||
self.head = nn.Sequential(
|
self.head = nn.Sequential(
|
||||||
RMS_norm(out_dim, images=False), nn.SiLU(),
|
RMS_norm(out_dim, images=False), nn.SiLU(),
|
||||||
CausalConv3d(out_dim, 3, 3, padding=1))
|
CausalConv3d(out_dim, output_channels, 3, padding=1))
|
||||||
|
|
||||||
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
def forward(self, x, feat_cache=None, feat_idx=[0]):
|
||||||
## conv1
|
## conv1
|
||||||
@ -449,6 +451,7 @@ class WanVAE(nn.Module):
|
|||||||
num_res_blocks=2,
|
num_res_blocks=2,
|
||||||
attn_scales=[],
|
attn_scales=[],
|
||||||
temperal_downsample=[True, True, False],
|
temperal_downsample=[True, True, False],
|
||||||
|
image_channels=3,
|
||||||
dropout=0.0):
|
dropout=0.0):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.dim = dim
|
self.dim = dim
|
||||||
@ -460,11 +463,11 @@ class WanVAE(nn.Module):
|
|||||||
self.temperal_upsample = temperal_downsample[::-1]
|
self.temperal_upsample = temperal_downsample[::-1]
|
||||||
|
|
||||||
# modules
|
# modules
|
||||||
self.encoder = Encoder3d(dim, z_dim * 2, dim_mult, num_res_blocks,
|
self.encoder = Encoder3d(dim, z_dim * 2, image_channels, dim_mult, num_res_blocks,
|
||||||
attn_scales, self.temperal_downsample, dropout)
|
attn_scales, self.temperal_downsample, dropout)
|
||||||
self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1)
|
self.conv1 = CausalConv3d(z_dim * 2, z_dim * 2, 1)
|
||||||
self.conv2 = CausalConv3d(z_dim, z_dim, 1)
|
self.conv2 = CausalConv3d(z_dim, z_dim, 1)
|
||||||
self.decoder = Decoder3d(dim, z_dim, dim_mult, num_res_blocks,
|
self.decoder = Decoder3d(dim, z_dim, image_channels, dim_mult, num_res_blocks,
|
||||||
attn_scales, self.temperal_upsample, dropout)
|
attn_scales, self.temperal_upsample, dropout)
|
||||||
|
|
||||||
def encode(self, x):
|
def encode(self, x):
|
||||||
|
|||||||
24
comfy/sd.py
24
comfy/sd.py
@ -321,6 +321,7 @@ class VAE:
|
|||||||
self.latent_channels = 4
|
self.latent_channels = 4
|
||||||
self.latent_dim = 2
|
self.latent_dim = 2
|
||||||
self.output_channels = 3
|
self.output_channels = 3
|
||||||
|
self.pad_channel_value = None
|
||||||
self.process_input = lambda image: image * 2.0 - 1.0
|
self.process_input = lambda image: image * 2.0 - 1.0
|
||||||
self.process_output = lambda image: torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)
|
self.process_output = lambda image: torch.clamp((image + 1.0) / 2.0, min=0.0, max=1.0)
|
||||||
self.working_dtypes = [torch.bfloat16, torch.float32]
|
self.working_dtypes = [torch.bfloat16, torch.float32]
|
||||||
@ -435,6 +436,7 @@ class VAE:
|
|||||||
self.memory_used_decode = lambda shape, dtype: (1000 * shape[2] * 2048) * model_management.dtype_size(dtype)
|
self.memory_used_decode = lambda shape, dtype: (1000 * shape[2] * 2048) * model_management.dtype_size(dtype)
|
||||||
self.latent_channels = 64
|
self.latent_channels = 64
|
||||||
self.output_channels = 2
|
self.output_channels = 2
|
||||||
|
self.pad_channel_value = "replicate"
|
||||||
self.upscale_ratio = 2048
|
self.upscale_ratio = 2048
|
||||||
self.downscale_ratio = 2048
|
self.downscale_ratio = 2048
|
||||||
self.latent_dim = 1
|
self.latent_dim = 1
|
||||||
@ -546,7 +548,9 @@ class VAE:
|
|||||||
self.downscale_index_formula = (4, 8, 8)
|
self.downscale_index_formula = (4, 8, 8)
|
||||||
self.latent_dim = 3
|
self.latent_dim = 3
|
||||||
self.latent_channels = 16
|
self.latent_channels = 16
|
||||||
ddconfig = {"dim": dim, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "dropout": 0.0}
|
self.output_channels = sd["encoder.conv1.weight"].shape[1]
|
||||||
|
self.pad_channel_value = 1.0
|
||||||
|
ddconfig = {"dim": dim, "z_dim": self.latent_channels, "dim_mult": [1, 2, 4, 4], "num_res_blocks": 2, "attn_scales": [], "temperal_downsample": [False, True, True], "image_channels": self.output_channels, "dropout": 0.0}
|
||||||
self.first_stage_model = comfy.ldm.wan.vae.WanVAE(**ddconfig)
|
self.first_stage_model = comfy.ldm.wan.vae.WanVAE(**ddconfig)
|
||||||
self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32]
|
self.working_dtypes = [torch.bfloat16, torch.float16, torch.float32]
|
||||||
self.memory_used_encode = lambda shape, dtype: (1500 if shape[2]<=4 else 6000) * shape[3] * shape[4] * model_management.dtype_size(dtype)
|
self.memory_used_encode = lambda shape, dtype: (1500 if shape[2]<=4 else 6000) * shape[3] * shape[4] * model_management.dtype_size(dtype)
|
||||||
@ -582,6 +586,7 @@ class VAE:
|
|||||||
self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 87000) * model_management.dtype_size(dtype)
|
self.memory_used_decode = lambda shape, dtype: (shape[2] * shape[3] * 87000) * model_management.dtype_size(dtype)
|
||||||
self.latent_channels = 8
|
self.latent_channels = 8
|
||||||
self.output_channels = 2
|
self.output_channels = 2
|
||||||
|
self.pad_channel_value = "replicate"
|
||||||
self.upscale_ratio = 4096
|
self.upscale_ratio = 4096
|
||||||
self.downscale_ratio = 4096
|
self.downscale_ratio = 4096
|
||||||
self.latent_dim = 2
|
self.latent_dim = 2
|
||||||
@ -690,9 +695,7 @@ class VAE:
|
|||||||
raise RuntimeError("ERROR: VAE is invalid: None\n\nIf the VAE is from a checkpoint loader node your checkpoint does not contain a valid VAE.")
|
raise RuntimeError("ERROR: VAE is invalid: None\n\nIf the VAE is from a checkpoint loader node your checkpoint does not contain a valid VAE.")
|
||||||
|
|
||||||
def vae_encode_crop_pixels(self, pixels):
|
def vae_encode_crop_pixels(self, pixels):
|
||||||
if not self.crop_input:
|
if self.crop_input:
|
||||||
return pixels
|
|
||||||
|
|
||||||
downscale_ratio = self.spacial_compression_encode()
|
downscale_ratio = self.spacial_compression_encode()
|
||||||
|
|
||||||
dims = pixels.shape[1:-1]
|
dims = pixels.shape[1:-1]
|
||||||
@ -701,6 +704,19 @@ class VAE:
|
|||||||
x_offset = (dims[d] % downscale_ratio) // 2
|
x_offset = (dims[d] % downscale_ratio) // 2
|
||||||
if x != dims[d]:
|
if x != dims[d]:
|
||||||
pixels = pixels.narrow(d + 1, x_offset, x)
|
pixels = pixels.narrow(d + 1, x_offset, x)
|
||||||
|
|
||||||
|
if pixels.shape[-1] > self.output_channels:
|
||||||
|
pixels = pixels[..., :self.output_channels]
|
||||||
|
elif pixels.shape[-1] < self.output_channels:
|
||||||
|
if self.pad_channel_value is not None:
|
||||||
|
if isinstance(self.pad_channel_value, str):
|
||||||
|
mode = self.pad_channel_value
|
||||||
|
value = None
|
||||||
|
else:
|
||||||
|
mode = "constant"
|
||||||
|
value = self.pad_channel_value
|
||||||
|
|
||||||
|
pixels = torch.nn.functional.pad(pixels, (0, self.output_channels - pixels.shape[-1]), mode=mode, value=value)
|
||||||
return pixels
|
return pixels
|
||||||
|
|
||||||
def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
|
def decode_tiled_(self, samples, tile_x=64, tile_y=64, overlap = 16):
|
||||||
|
|||||||
4
nodes.py
4
nodes.py
@ -343,7 +343,7 @@ class VAEEncode:
|
|||||||
CATEGORY = "latent"
|
CATEGORY = "latent"
|
||||||
|
|
||||||
def encode(self, vae, pixels):
|
def encode(self, vae, pixels):
|
||||||
t = vae.encode(pixels[:,:,:,:3])
|
t = vae.encode(pixels)
|
||||||
return ({"samples":t}, )
|
return ({"samples":t}, )
|
||||||
|
|
||||||
class VAEEncodeTiled:
|
class VAEEncodeTiled:
|
||||||
@ -361,7 +361,7 @@ class VAEEncodeTiled:
|
|||||||
CATEGORY = "_for_testing"
|
CATEGORY = "_for_testing"
|
||||||
|
|
||||||
def encode(self, vae, pixels, tile_size, overlap, temporal_size=64, temporal_overlap=8):
|
def encode(self, vae, pixels, tile_size, overlap, temporal_size=64, temporal_overlap=8):
|
||||||
t = vae.encode_tiled(pixels[:,:,:,:3], tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap)
|
t = vae.encode_tiled(pixels, tile_x=tile_size, tile_y=tile_size, overlap=overlap, tile_t=temporal_size, overlap_t=temporal_overlap)
|
||||||
return ({"samples": t}, )
|
return ({"samples": t}, )
|
||||||
|
|
||||||
class VAEEncodeForInpaint:
|
class VAEEncodeForInpaint:
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
comfyui-frontend-package==1.34.9
|
comfyui-frontend-package==1.34.9
|
||||||
comfyui-workflow-templates==0.7.59
|
comfyui-workflow-templates==0.7.60
|
||||||
comfyui-embedded-docs==0.3.1
|
comfyui-embedded-docs==0.3.1
|
||||||
torch
|
torch
|
||||||
torchsde
|
torchsde
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user