mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-03 02:00:29 +08:00
Merge f1d25a460c into dd86b15521
This commit is contained in:
commit
eedecee439
240
comfy/image_encoders/dino3.py
Normal file
240
comfy/image_encoders/dino3.py
Normal file
@ -0,0 +1,240 @@
|
||||
import math
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from comfy.ldm.modules.attention import optimized_attention_for_device
|
||||
from comfy.ldm.flux.math import apply_rope
|
||||
from dino2 import Dinov2MLP as DINOv3ViTMLP, LayerScale as DINOv3ViTLayerScale
|
||||
|
||||
class DINOv3ViTAttention(nn.Module):
|
||||
def __init__(self, hidden_size, num_attention_heads, device, dtype, operations):
|
||||
super().__init__()
|
||||
self.embed_dim = hidden_size
|
||||
self.num_heads = num_attention_heads
|
||||
self.head_dim = self.embed_dim // self.num_heads
|
||||
self.is_causal = False
|
||||
|
||||
self.scaling = self.head_dim**-0.5
|
||||
self.is_causal = False
|
||||
|
||||
self.k_proj = operations.Linear(self.embed_dim, self.embed_dim, bias=False, device=device, dtype=dtype) # key_bias = False
|
||||
self.v_proj = operations.Linear(self.embed_dim, self.embed_dim, bias=True, device=device, dtype=dtype)
|
||||
|
||||
self.q_proj = operations.Linear(self.embed_dim, self.embed_dim, bias=True, device=device, dtype=dtype)
|
||||
self.o_proj = operations.Linear(self.embed_dim, self.embed_dim, bias=True, device=device, dtype=dtype)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
attention_mask: torch.Tensor | None = None,
|
||||
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
|
||||
**kwargs,
|
||||
) -> tuple[torch.Tensor, torch.Tensor | None]:
|
||||
|
||||
batch_size, patches, _ = hidden_states.size()
|
||||
|
||||
query_states = self.q_proj(hidden_states)
|
||||
key_states = self.k_proj(hidden_states)
|
||||
value_states = self.v_proj(hidden_states)
|
||||
|
||||
query_states = query_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
key_states = key_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
value_states = value_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
|
||||
|
||||
cos, sin = position_embeddings
|
||||
position_embeddings = torch.stack([cos, sin], dim = -1)
|
||||
query_states, key_states = apply_rope(query_states, key_states, position_embeddings)
|
||||
|
||||
attn_output, attn_weights = optimized_attention_for_device(
|
||||
query_states, key_states, value_states, attention_mask, skip_reshape=True, skip_output_reshape=True
|
||||
)
|
||||
|
||||
attn_output = attn_output.reshape(batch_size, patches, -1).contiguous()
|
||||
attn_output = self.o_proj(attn_output)
|
||||
|
||||
return attn_output, attn_weights
|
||||
|
||||
class DINOv3ViTGatedMLP(nn.Module):
|
||||
def __init__(self, hidden_size, intermediate_size, mlp_bias, device, dtype, operations):
|
||||
super().__init__()
|
||||
self.hidden_size = hidden_size
|
||||
self.intermediate_size = intermediate_size
|
||||
self.gate_proj = operations.Linear(self.hidden_size, self.intermediate_size, bias=mlp_bias, device=device, dtype=dtype)
|
||||
self.up_proj = operations.Linear(self.hidden_size, self.intermediate_size, bias=mlp_bias, device=device, dtype=dtype)
|
||||
self.down_proj = operations.Linear(self.intermediate_size, self.hidden_size, bias=mlp_bias, device=device, dtype=dtype)
|
||||
self.act_fn = torch.nn.GELU()
|
||||
|
||||
def forward(self, x):
|
||||
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
|
||||
return down_proj
|
||||
|
||||
def get_patches_center_coordinates(
|
||||
num_patches_h: int, num_patches_w: int, dtype: torch.dtype, device: torch.device
|
||||
) -> torch.Tensor:
|
||||
|
||||
coords_h = torch.arange(0.5, num_patches_h, dtype=dtype, device=device)
|
||||
coords_w = torch.arange(0.5, num_patches_w, dtype=dtype, device=device)
|
||||
coords_h = coords_h / num_patches_h
|
||||
coords_w = coords_w / num_patches_w
|
||||
coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij"), dim=-1)
|
||||
coords = coords.flatten(0, 1)
|
||||
coords = 2.0 * coords - 1.0
|
||||
return coords
|
||||
|
||||
class DINOv3ViTRopePositionEmbedding(nn.Module):
|
||||
inv_freq: torch.Tensor
|
||||
|
||||
def __init__(self, rope_theta, hidden_size, num_attention_heads, image_size, patch_size, device, dtype):
|
||||
super().__init__()
|
||||
self.base = rope_theta
|
||||
self.head_dim = hidden_size // num_attention_heads
|
||||
self.num_patches_h = image_size // patch_size
|
||||
self.num_patches_w = image_size // patch_size
|
||||
|
||||
inv_freq = 1 / self.base ** torch.arange(0, 1, 4 / self.head_dim, dtype=torch.float32, device=device)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
|
||||
def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
|
||||
_, _, height, width = pixel_values.shape
|
||||
num_patches_h = height // self.patch_size
|
||||
num_patches_w = width // self.patch_size
|
||||
|
||||
device = pixel_values.device
|
||||
device_type = device.type if isinstance(device.type, str) and device.type != "mps" else "cpu"
|
||||
with torch.amp.autocast(device_type = device_type, enabled=False):
|
||||
patch_coords = get_patches_center_coordinates(
|
||||
num_patches_h, num_patches_w, dtype=torch.float32, device=device
|
||||
)
|
||||
|
||||
angles = 2 * math.pi * patch_coords[:, :, None] * self.inv_freq[None, None, :]
|
||||
angles = angles.flatten(1, 2)
|
||||
angles = angles.tile(2)
|
||||
|
||||
cos = torch.cos(angles)
|
||||
sin = torch.sin(angles)
|
||||
|
||||
dtype = pixel_values.dtype
|
||||
return cos.to(dtype=dtype), sin.to(dtype=dtype)
|
||||
|
||||
|
||||
class DINOv3ViTEmbeddings(nn.Module):
|
||||
def __init__(self, hidden_size, num_register_tokens, num_channels, patch_size, dtype, device, operations):
|
||||
super().__init__()
|
||||
self.cls_token = nn.Parameter(torch.randn(1, 1, hidden_size, device=device, dtype=dtype))
|
||||
self.mask_token = nn.Parameter(torch.zeros(1, 1, hidden_size, device=device, dtype=dtype))
|
||||
self.register_tokens = nn.Parameter(torch.empty(1, num_register_tokens, hidden_size, device=device, dtype=dtype))
|
||||
self.patch_embeddings = operations.Conv2d(
|
||||
num_channels, hidden_size, kernel_size=patch_size, stride=patch_size, device=device, dtype=dtype
|
||||
)
|
||||
|
||||
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: torch.Tensor | None = None):
|
||||
batch_size = pixel_values.shape[0]
|
||||
target_dtype = self.patch_embeddings.weight.dtype
|
||||
|
||||
patch_embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
|
||||
patch_embeddings = patch_embeddings.flatten(2).transpose(1, 2)
|
||||
|
||||
if bool_masked_pos is not None:
|
||||
mask_token = self.mask_token.to(patch_embeddings.dtype)
|
||||
patch_embeddings = torch.where(bool_masked_pos.unsqueeze(-1), mask_token, patch_embeddings)
|
||||
|
||||
cls_token = self.cls_token.expand(batch_size, -1, -1)
|
||||
register_tokens = self.register_tokens.expand(batch_size, -1, -1)
|
||||
embeddings = torch.cat([cls_token, register_tokens, patch_embeddings], dim=1)
|
||||
|
||||
return embeddings
|
||||
|
||||
class DINOv3ViTLayer(nn.Module):
|
||||
|
||||
def __init__(self, hidden_size, layer_norm_eps, use_gated_mlp, layerscale_value, mlp_bias, intermediate_size, num_attention_heads,
|
||||
device, dtype, operations):
|
||||
super().__init__()
|
||||
|
||||
self.norm1 = operations.LayerNorm(hidden_size, eps=layer_norm_eps)
|
||||
self.attention = DINOv3ViTAttention(hidden_size, num_attention_heads, device=device, dtype=dtype, operations=operations)
|
||||
self.layer_scale1 = DINOv3ViTLayerScale(hidden_size, layerscale_value, device=device, dtype=dtype)
|
||||
|
||||
self.norm2 = operations.LayerNorm(hidden_size, eps=layer_norm_eps, device=device, dtype=dtype)
|
||||
|
||||
if use_gated_mlp:
|
||||
self.mlp = DINOv3ViTGatedMLP(hidden_size, intermediate_size, mlp_bias, device=device, dtype=dtype, operations=operations)
|
||||
else:
|
||||
self.mlp = DINOv3ViTMLP(hidden_size, device=device, dtype=dtype, operations=operations)
|
||||
self.layer_scale2 = DINOv3ViTLayerScale(hidden_size, layerscale_value, device=device, dtype=dtype)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
attention_mask: torch.Tensor | None = None,
|
||||
position_embeddings: tuple[torch.Tensor, torch.Tensor] | None = None,
|
||||
) -> torch.Tensor:
|
||||
residual = hidden_states
|
||||
hidden_states = self.norm1(hidden_states)
|
||||
hidden_states, _ = self.attention(
|
||||
hidden_states,
|
||||
attention_mask=attention_mask,
|
||||
position_embeddings=position_embeddings,
|
||||
)
|
||||
hidden_states = self.layer_scale1(hidden_states)
|
||||
hidden_states = hidden_states + residual
|
||||
|
||||
residual = hidden_states
|
||||
hidden_states = self.norm2(hidden_states)
|
||||
hidden_states = self.mlp(hidden_states)
|
||||
hidden_states = self.layer_scale2(hidden_states)
|
||||
hidden_states = hidden_states + residual
|
||||
|
||||
return hidden_states
|
||||
|
||||
|
||||
class DINOv3ViTModel(nn.Module):
|
||||
def __init__(self, config, device, dtype, operations):
|
||||
super().__init__()
|
||||
num_hidden_layers = config["num_hidden_layers"]
|
||||
hidden_size = config["hidden_size"]
|
||||
num_attention_heads = config["num_attention_heads"]
|
||||
num_register_tokens = config["num_register_tokens"]
|
||||
intermediate_size = config["intermediate_size"]
|
||||
layer_norm_eps = config["layer_norm_eps"]
|
||||
layerscale_value = config["layerscale_value"]
|
||||
num_channels = config["num_channels"]
|
||||
patch_size = config["patch_size"]
|
||||
rope_theta = config["rope_theta"]
|
||||
|
||||
self.embeddings = DINOv3ViTEmbeddings(
|
||||
hidden_size, num_register_tokens, num_channels=num_channels, patch_size=patch_size, dtype=dtype, device=device, operations=operations
|
||||
)
|
||||
self.rope_embeddings = DINOv3ViTRopePositionEmbedding(
|
||||
rope_theta, hidden_size, num_attention_heads, image_size=512, patch_size=patch_size, dtype=dtype, device=device
|
||||
)
|
||||
self.layer = nn.ModuleList(
|
||||
[DINOv3ViTLayer(hidden_size, layer_norm_eps, use_gated_mlp=False, layerscale_value=layerscale_value, mlp_bias=True,
|
||||
intermediate_size=intermediate_size,num_attention_heads = num_attention_heads,
|
||||
dtype=dtype, device=device, operations=operations)
|
||||
for _ in range(num_hidden_layers)])
|
||||
self.norm = nn.LayerNorm(hidden_size, eps=layer_norm_eps, dtype=dtype, device=device)
|
||||
|
||||
def get_input_embeddings(self):
|
||||
return self.embeddings.patch_embeddings
|
||||
|
||||
def forward(
|
||||
self,
|
||||
pixel_values: torch.Tensor,
|
||||
bool_masked_pos: torch.Tensor | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
|
||||
pixel_values = pixel_values.to(self.embeddings.patch_embeddings.weight.dtype)
|
||||
hidden_states = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
|
||||
position_embeddings = self.rope_embeddings(pixel_values)
|
||||
|
||||
for i, layer_module in enumerate(self.layer):
|
||||
hidden_states = layer_module(
|
||||
hidden_states,
|
||||
position_embeddings=position_embeddings,
|
||||
)
|
||||
|
||||
sequence_output = self.norm(hidden_states)
|
||||
pooled_output = sequence_output[:, 0, :]
|
||||
|
||||
return sequence_output, None, pooled_output, None
|
||||
24
comfy/image_encoders/dino3_large.json
Normal file
24
comfy/image_encoders/dino3_large.json
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
|
||||
"hidden_size": 384,
|
||||
"image_size": 224,
|
||||
"initializer_range": 0.02,
|
||||
"intermediate_size": 1536,
|
||||
"key_bias": false,
|
||||
"layer_norm_eps": 1e-05,
|
||||
"layerscale_value": 1.0,
|
||||
"mlp_bias": true,
|
||||
"num_attention_heads": 6,
|
||||
"num_channels": 3,
|
||||
"num_hidden_layers": 12,
|
||||
"num_register_tokens": 4,
|
||||
"patch_size": 16,
|
||||
"pos_embed_rescale": 2.0,
|
||||
"proj_bias": true,
|
||||
"query_bias": true,
|
||||
"rope_theta": 100.0,
|
||||
"use_gated_mlp": false,
|
||||
"value_bias": true,
|
||||
"mean": [0.485, 0.456, 0.406],
|
||||
"std": [0.229, 0.224, 0.225]
|
||||
}
|
||||
194
comfy/ldm/trellis2/attention.py
Normal file
194
comfy/ldm/trellis2/attention.py
Normal file
@ -0,0 +1,194 @@
|
||||
import torch
|
||||
import math
|
||||
from comfy.ldm.modules.attention import optimized_attention
|
||||
from typing import Tuple, Union, List
|
||||
from vae import VarLenTensor
|
||||
|
||||
def sparse_windowed_scaled_dot_product_self_attention(
|
||||
qkv,
|
||||
window_size: int,
|
||||
shift_window: Tuple[int, int, int] = (0, 0, 0)
|
||||
):
|
||||
|
||||
serialization_spatial_cache_name = f'windowed_attention_{window_size}_{shift_window}'
|
||||
serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
|
||||
if serialization_spatial_cache is None:
|
||||
fwd_indices, bwd_indices, seq_lens, attn_func_args = calc_window_partition(qkv, window_size, shift_window)
|
||||
qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, attn_func_args))
|
||||
else:
|
||||
fwd_indices, bwd_indices, seq_lens, attn_func_args = serialization_spatial_cache
|
||||
|
||||
qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
|
||||
|
||||
if optimized_attention.__name__ == 'attention_xformers':
|
||||
if 'xops' not in globals():
|
||||
import xformers.ops as xops
|
||||
q, k, v = qkv_feats.unbind(dim=1)
|
||||
q = q.unsqueeze(0) # [1, M, H, C]
|
||||
k = k.unsqueeze(0) # [1, M, H, C]
|
||||
v = v.unsqueeze(0) # [1, M, H, C]
|
||||
out = xops.memory_efficient_attention(q, k, v, **attn_func_args)[0] # [M, H, C]
|
||||
elif optimized_attention.__name__ == 'attention_flash':
|
||||
if 'flash_attn' not in globals():
|
||||
import flash_attn
|
||||
out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, **attn_func_args) # [M, H, C]
|
||||
|
||||
out = out[bwd_indices] # [T, H, C]
|
||||
|
||||
return qkv.replace(out)
|
||||
|
||||
def calc_window_partition(
|
||||
tensor,
|
||||
window_size: Union[int, Tuple[int, ...]],
|
||||
shift_window: Union[int, Tuple[int, ...]] = 0,
|
||||
) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]:
|
||||
|
||||
DIM = tensor.coords.shape[1] - 1
|
||||
shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window
|
||||
window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size
|
||||
shifted_coords = tensor.coords.clone().detach()
|
||||
shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0)
|
||||
|
||||
MAX_COORDS = [i + j for i, j in zip(tensor.spatial_shape, shift_window)]
|
||||
NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)]
|
||||
OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1]
|
||||
|
||||
shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0)
|
||||
shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1)
|
||||
fwd_indices = torch.argsort(shifted_indices)
|
||||
bwd_indices = torch.empty_like(fwd_indices)
|
||||
bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device)
|
||||
seq_lens = torch.bincount(shifted_indices)
|
||||
mask = seq_lens != 0
|
||||
seq_lens = seq_lens[mask]
|
||||
|
||||
if optimized_attention.__name__ == 'attention_xformers':
|
||||
if 'xops' not in globals():
|
||||
import xformers.ops as xops
|
||||
attn_func_args = {
|
||||
'attn_bias': xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
|
||||
}
|
||||
elif optimized_attention.__name__ == 'attention_flash':
|
||||
attn_func_args = {
|
||||
'cu_seqlens': torch.cat([torch.tensor([0], device=tensor.device), torch.cumsum(seq_lens, dim=0)], dim=0).int(),
|
||||
'max_seqlen': torch.max(seq_lens)
|
||||
}
|
||||
|
||||
return fwd_indices, bwd_indices, seq_lens, attn_func_args
|
||||
|
||||
|
||||
def sparse_scaled_dot_product_attention(*args, **kwargs):
|
||||
arg_names_dict = {
|
||||
1: ['qkv'],
|
||||
2: ['q', 'kv'],
|
||||
3: ['q', 'k', 'v']
|
||||
}
|
||||
num_all_args = len(args) + len(kwargs)
|
||||
for key in arg_names_dict[num_all_args][len(args):]:
|
||||
assert key in kwargs, f"Missing argument {key}"
|
||||
|
||||
if num_all_args == 1:
|
||||
qkv = args[0] if len(args) > 0 else kwargs['qkv']
|
||||
device = qkv.device
|
||||
|
||||
s = qkv
|
||||
q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
|
||||
kv_seqlen = q_seqlen
|
||||
qkv = qkv.feats # [T, 3, H, C]
|
||||
|
||||
elif num_all_args == 2:
|
||||
q = args[0] if len(args) > 0 else kwargs['q']
|
||||
kv = args[1] if len(args) > 1 else kwargs['kv']
|
||||
device = q.device
|
||||
|
||||
if isinstance(q, VarLenTensor):
|
||||
s = q
|
||||
q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
|
||||
q = q.feats # [T_Q, H, C]
|
||||
else:
|
||||
s = None
|
||||
N, L, H, C = q.shape
|
||||
q_seqlen = [L] * N
|
||||
q = q.reshape(N * L, H, C) # [T_Q, H, C]
|
||||
|
||||
if isinstance(kv, VarLenTensor):
|
||||
kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
|
||||
kv = kv.feats # [T_KV, 2, H, C]
|
||||
else:
|
||||
N, L, _, H, C = kv.shape
|
||||
kv_seqlen = [L] * N
|
||||
kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
|
||||
|
||||
elif num_all_args == 3:
|
||||
q = args[0] if len(args) > 0 else kwargs['q']
|
||||
k = args[1] if len(args) > 1 else kwargs['k']
|
||||
v = args[2] if len(args) > 2 else kwargs['v']
|
||||
device = q.device
|
||||
|
||||
if isinstance(q, VarLenTensor):
|
||||
s = q
|
||||
q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
|
||||
q = q.feats # [T_Q, H, Ci]
|
||||
else:
|
||||
s = None
|
||||
N, L, H, CI = q.shape
|
||||
q_seqlen = [L] * N
|
||||
q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
|
||||
|
||||
if isinstance(k, VarLenTensor):
|
||||
kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
|
||||
k = k.feats # [T_KV, H, Ci]
|
||||
v = v.feats # [T_KV, H, Co]
|
||||
else:
|
||||
N, L, H, CI, CO = *k.shape, v.shape[-1]
|
||||
kv_seqlen = [L] * N
|
||||
k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
|
||||
v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
|
||||
|
||||
if optimized_attention.__name__ == 'attention_xformers':
|
||||
if 'xops' not in globals():
|
||||
import xformers.ops as xops
|
||||
if num_all_args == 1:
|
||||
q, k, v = qkv.unbind(dim=1)
|
||||
elif num_all_args == 2:
|
||||
k, v = kv.unbind(dim=1)
|
||||
q = q.unsqueeze(0)
|
||||
k = k.unsqueeze(0)
|
||||
v = v.unsqueeze(0)
|
||||
mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
|
||||
out = xops.memory_efficient_attention(q, k, v, mask)[0]
|
||||
elif optimized_attention.__name__ == 'attention_flash':
|
||||
if 'flash_attn' not in globals():
|
||||
import flash_attn
|
||||
cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
|
||||
if num_all_args in [2, 3]:
|
||||
cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
|
||||
if num_all_args == 1:
|
||||
out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
|
||||
elif num_all_args == 2:
|
||||
out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
|
||||
elif num_all_args == 3:
|
||||
out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
|
||||
elif optimized_attention.__name__ == 'flash_attn_3': # TODO
|
||||
if 'flash_attn_3' not in globals():
|
||||
import flash_attn_interface as flash_attn_3
|
||||
cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
|
||||
if num_all_args == 1:
|
||||
q, k, v = qkv.unbind(dim=1)
|
||||
cu_seqlens_kv = cu_seqlens_q.clone()
|
||||
max_q_seqlen = max_kv_seqlen = max(q_seqlen)
|
||||
elif num_all_args == 2:
|
||||
k, v = kv.unbind(dim=1)
|
||||
cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
|
||||
max_q_seqlen = max(q_seqlen)
|
||||
max_kv_seqlen = max(kv_seqlen)
|
||||
elif num_all_args == 3:
|
||||
cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
|
||||
max_q_seqlen = max(q_seqlen)
|
||||
max_kv_seqlen = max(kv_seqlen)
|
||||
out = flash_attn_3.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_q_seqlen, max_kv_seqlen)
|
||||
|
||||
if s is not None:
|
||||
return s.replace(out)
|
||||
else:
|
||||
return out.reshape(N, L, H, -1)
|
||||
149
comfy/ldm/trellis2/cumesh.py
Normal file
149
comfy/ldm/trellis2/cumesh.py
Normal file
@ -0,0 +1,149 @@
|
||||
# will contain every cuda -> pytorch operation
|
||||
|
||||
import torch
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class TorchHashMap:
|
||||
def __init__(self, keys: torch.Tensor, values: torch.Tensor, default_value: int):
|
||||
device = keys.device
|
||||
# use long for searchsorted
|
||||
self.sorted_keys, order = torch.sort(keys.long())
|
||||
self.sorted_vals = values.long()[order]
|
||||
self.default_value = torch.tensor(default_value, dtype=torch.long, device=device)
|
||||
self._n = self.sorted_keys.numel()
|
||||
|
||||
def lookup_flat(self, flat_keys: torch.Tensor) -> torch.Tensor:
|
||||
flat = flat_keys.long()
|
||||
idx = torch.searchsorted(self.sorted_keys, flat)
|
||||
found = (idx < self._n) & (self.sorted_keys[idx] == flat)
|
||||
out = torch.full((flat.shape[0],), self.default_value, device=flat.device, dtype=self.sorted_vals.dtype)
|
||||
if found.any():
|
||||
out[found] = self.sorted_vals[idx[found]]
|
||||
return out
|
||||
|
||||
class Voxel:
|
||||
def __init__(
|
||||
self,
|
||||
origin: list,
|
||||
voxel_size: float,
|
||||
coords: torch.Tensor = None,
|
||||
attrs: torch.Tensor = None,
|
||||
layout: Dict = {},
|
||||
device: torch.device = 'cuda'
|
||||
):
|
||||
self.origin = torch.tensor(origin, dtype=torch.float32, device=device)
|
||||
self.voxel_size = voxel_size
|
||||
self.coords = coords
|
||||
self.attrs = attrs
|
||||
self.layout = layout
|
||||
self.device = device
|
||||
|
||||
@property
|
||||
def position(self):
|
||||
return (self.coords + 0.5) * self.voxel_size + self.origin[None, :]
|
||||
|
||||
def split_attrs(self):
|
||||
return {
|
||||
k: self.attrs[:, self.layout[k]]
|
||||
for k in self.layout
|
||||
}
|
||||
|
||||
class Mesh:
|
||||
def __init__(self,
|
||||
vertices,
|
||||
faces,
|
||||
vertex_attrs=None
|
||||
):
|
||||
self.vertices = vertices.float()
|
||||
self.faces = faces.int()
|
||||
self.vertex_attrs = vertex_attrs
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
return self.vertices.device
|
||||
|
||||
def to(self, device, non_blocking=False):
|
||||
return Mesh(
|
||||
self.vertices.to(device, non_blocking=non_blocking),
|
||||
self.faces.to(device, non_blocking=non_blocking),
|
||||
self.vertex_attrs.to(device, non_blocking=non_blocking) if self.vertex_attrs is not None else None,
|
||||
)
|
||||
|
||||
def cuda(self, non_blocking=False):
|
||||
return self.to('cuda', non_blocking=non_blocking)
|
||||
|
||||
def cpu(self):
|
||||
return self.to('cpu')
|
||||
|
||||
# TODO could be an option
|
||||
def fill_holes(self, max_hole_perimeter=3e-2):
|
||||
import cumesh
|
||||
vertices = self.vertices.cuda()
|
||||
faces = self.faces.cuda()
|
||||
|
||||
mesh = cumesh.CuMesh()
|
||||
mesh.init(vertices, faces)
|
||||
mesh.get_edges()
|
||||
mesh.get_boundary_info()
|
||||
if mesh.num_boundaries == 0:
|
||||
return
|
||||
mesh.get_vertex_edge_adjacency()
|
||||
mesh.get_vertex_boundary_adjacency()
|
||||
mesh.get_manifold_boundary_adjacency()
|
||||
mesh.read_manifold_boundary_adjacency()
|
||||
mesh.get_boundary_connected_components()
|
||||
mesh.get_boundary_loops()
|
||||
if mesh.num_boundary_loops == 0:
|
||||
return
|
||||
mesh.fill_holes(max_hole_perimeter=max_hole_perimeter)
|
||||
new_vertices, new_faces = mesh.read()
|
||||
|
||||
self.vertices = new_vertices.to(self.device)
|
||||
self.faces = new_faces.to(self.device)
|
||||
|
||||
# TODO could be an option
|
||||
def simplify(self, target=1000000, verbose: bool=False, options: dict={}):
|
||||
import cumesh
|
||||
vertices = self.vertices.cuda()
|
||||
faces = self.faces.cuda()
|
||||
|
||||
mesh = cumesh.CuMesh()
|
||||
mesh.init(vertices, faces)
|
||||
mesh.simplify(target, verbose=verbose, options=options)
|
||||
new_vertices, new_faces = mesh.read()
|
||||
|
||||
self.vertices = new_vertices.to(self.device)
|
||||
self.faces = new_faces.to(self.device)
|
||||
|
||||
class MeshWithVoxel(Mesh, Voxel):
|
||||
def __init__(self,
|
||||
vertices: torch.Tensor,
|
||||
faces: torch.Tensor,
|
||||
origin: list,
|
||||
voxel_size: float,
|
||||
coords: torch.Tensor,
|
||||
attrs: torch.Tensor,
|
||||
voxel_shape: torch.Size,
|
||||
layout: Dict = {},
|
||||
):
|
||||
self.vertices = vertices.float()
|
||||
self.faces = faces.int()
|
||||
self.origin = torch.tensor(origin, dtype=torch.float32, device=self.device)
|
||||
self.voxel_size = voxel_size
|
||||
self.coords = coords
|
||||
self.attrs = attrs
|
||||
self.voxel_shape = voxel_shape
|
||||
self.layout = layout
|
||||
|
||||
def to(self, device, non_blocking=False):
|
||||
return MeshWithVoxel(
|
||||
self.vertices.to(device, non_blocking=non_blocking),
|
||||
self.faces.to(device, non_blocking=non_blocking),
|
||||
self.origin.tolist(),
|
||||
self.voxel_size,
|
||||
self.coords.to(device, non_blocking=non_blocking),
|
||||
self.attrs.to(device, non_blocking=non_blocking),
|
||||
self.voxel_shape,
|
||||
self.layout,
|
||||
)
|
||||
499
comfy/ldm/trellis2/model.py
Normal file
499
comfy/ldm/trellis2/model.py
Normal file
@ -0,0 +1,499 @@
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import torch.nn as nn
|
||||
from vae import SparseTensor, SparseLinear, sparse_cat, VarLenTensor
|
||||
from typing import Optional, Tuple, Literal, Union, List
|
||||
from attention import sparse_windowed_scaled_dot_product_self_attention, sparse_scaled_dot_product_attention
|
||||
from comfy.ldm.genmo.joint_model.layers import TimestepEmbedder
|
||||
|
||||
class SparseGELU(nn.GELU):
|
||||
def forward(self, input: VarLenTensor) -> VarLenTensor:
|
||||
return input.replace(super().forward(input.feats))
|
||||
|
||||
class SparseFeedForwardNet(nn.Module):
|
||||
def __init__(self, channels: int, mlp_ratio: float = 4.0):
|
||||
super().__init__()
|
||||
self.mlp = nn.Sequential(
|
||||
SparseLinear(channels, int(channels * mlp_ratio)),
|
||||
SparseGELU(approximate="tanh"),
|
||||
SparseLinear(int(channels * mlp_ratio), channels),
|
||||
)
|
||||
|
||||
def forward(self, x: VarLenTensor) -> VarLenTensor:
|
||||
return self.mlp(x)
|
||||
|
||||
def manual_cast(tensor, dtype):
|
||||
if not torch.is_autocast_enabled():
|
||||
return tensor.type(dtype)
|
||||
return tensor
|
||||
class LayerNorm32(nn.LayerNorm):
|
||||
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||
x_dtype = x.dtype
|
||||
x = manual_cast(x, torch.float32)
|
||||
o = super().forward(x)
|
||||
return manual_cast(o, x_dtype)
|
||||
|
||||
|
||||
class SparseMultiHeadRMSNorm(nn.Module):
|
||||
def __init__(self, dim: int, heads: int):
|
||||
super().__init__()
|
||||
self.scale = dim ** 0.5
|
||||
self.gamma = nn.Parameter(torch.ones(heads, dim))
|
||||
|
||||
def forward(self, x: Union[VarLenTensor, torch.Tensor]) -> Union[VarLenTensor, torch.Tensor]:
|
||||
x_type = x.dtype
|
||||
x = x.float()
|
||||
if isinstance(x, VarLenTensor):
|
||||
x = x.replace(F.normalize(x.feats, dim=-1) * self.gamma * self.scale)
|
||||
else:
|
||||
x = F.normalize(x, dim=-1) * self.gamma * self.scale
|
||||
return x.to(x_type)
|
||||
|
||||
# TODO: replace with apply_rope1
|
||||
class SparseRotaryPositionEmbedder(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
head_dim: int,
|
||||
dim: int = 3,
|
||||
rope_freq: Tuple[float, float] = (1.0, 10000.0)
|
||||
):
|
||||
super().__init__()
|
||||
assert head_dim % 2 == 0, "Head dim must be divisible by 2"
|
||||
self.head_dim = head_dim
|
||||
self.dim = dim
|
||||
self.rope_freq = rope_freq
|
||||
self.freq_dim = head_dim // 2 // dim
|
||||
self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
|
||||
self.freqs = rope_freq[0] / (rope_freq[1] ** (self.freqs))
|
||||
|
||||
def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
|
||||
self.freqs = self.freqs.to(indices.device)
|
||||
phases = torch.outer(indices, self.freqs)
|
||||
phases = torch.polar(torch.ones_like(phases), phases)
|
||||
return phases
|
||||
|
||||
def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
|
||||
x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
|
||||
x_rotated = x_complex * phases.unsqueeze(-2)
|
||||
x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
|
||||
return x_embed
|
||||
|
||||
def forward(self, q: SparseTensor, k: Optional[SparseTensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
Args:
|
||||
q (SparseTensor): [..., N, H, D] tensor of queries
|
||||
k (SparseTensor): [..., N, H, D] tensor of keys
|
||||
"""
|
||||
assert q.coords.shape[-1] == self.dim + 1, "Last dimension of coords must be equal to dim+1"
|
||||
phases_cache_name = f'rope_phase_{self.dim}d_freq{self.rope_freq[0]}-{self.rope_freq[1]}_hd{self.head_dim}'
|
||||
phases = q.get_spatial_cache(phases_cache_name)
|
||||
if phases is None:
|
||||
coords = q.coords[..., 1:]
|
||||
phases = self._get_phases(coords.reshape(-1)).reshape(*coords.shape[:-1], -1)
|
||||
if phases.shape[-1] < self.head_dim // 2:
|
||||
padn = self.head_dim // 2 - phases.shape[-1]
|
||||
phases = torch.cat([phases, torch.polar(
|
||||
torch.ones(*phases.shape[:-1], padn, device=phases.device),
|
||||
torch.zeros(*phases.shape[:-1], padn, device=phases.device)
|
||||
)], dim=-1)
|
||||
q.register_spatial_cache(phases_cache_name, phases)
|
||||
q_embed = q.replace(self._rotary_embedding(q.feats, phases))
|
||||
if k is None:
|
||||
return q_embed
|
||||
k_embed = k.replace(self._rotary_embedding(k.feats, phases))
|
||||
return q_embed, k_embed
|
||||
|
||||
class SparseMultiHeadAttention(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
num_heads: int,
|
||||
ctx_channels: Optional[int] = None,
|
||||
type: Literal["self", "cross"] = "self",
|
||||
attn_mode: Literal["full", "windowed", "double_windowed"] = "full",
|
||||
window_size: Optional[int] = None,
|
||||
shift_window: Optional[Tuple[int, int, int]] = None,
|
||||
qkv_bias: bool = True,
|
||||
use_rope: bool = False,
|
||||
rope_freq: Tuple[int, int] = (1.0, 10000.0),
|
||||
qk_rms_norm: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
self.channels = channels
|
||||
self.head_dim = channels // num_heads
|
||||
self.ctx_channels = ctx_channels if ctx_channels is not None else channels
|
||||
self.num_heads = num_heads
|
||||
self._type = type
|
||||
self.attn_mode = attn_mode
|
||||
self.window_size = window_size
|
||||
self.shift_window = shift_window
|
||||
self.use_rope = use_rope
|
||||
self.qk_rms_norm = qk_rms_norm
|
||||
|
||||
if self._type == "self":
|
||||
self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
|
||||
else:
|
||||
self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
|
||||
self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
|
||||
|
||||
if self.qk_rms_norm:
|
||||
self.q_rms_norm = SparseMultiHeadRMSNorm(self.head_dim, num_heads)
|
||||
self.k_rms_norm = SparseMultiHeadRMSNorm(self.head_dim, num_heads)
|
||||
|
||||
self.to_out = nn.Linear(channels, channels)
|
||||
|
||||
if use_rope:
|
||||
self.rope = SparseRotaryPositionEmbedder(self.head_dim, rope_freq=rope_freq)
|
||||
|
||||
@staticmethod
|
||||
def _linear(module: nn.Linear, x: Union[VarLenTensor, torch.Tensor]) -> Union[VarLenTensor, torch.Tensor]:
|
||||
if isinstance(x, VarLenTensor):
|
||||
return x.replace(module(x.feats))
|
||||
else:
|
||||
return module(x)
|
||||
|
||||
@staticmethod
|
||||
def _reshape_chs(x: Union[VarLenTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[VarLenTensor, torch.Tensor]:
|
||||
if isinstance(x, VarLenTensor):
|
||||
return x.reshape(*shape)
|
||||
else:
|
||||
return x.reshape(*x.shape[:2], *shape)
|
||||
|
||||
def _fused_pre(self, x: Union[VarLenTensor, torch.Tensor], num_fused: int) -> Union[VarLenTensor, torch.Tensor]:
|
||||
if isinstance(x, VarLenTensor):
|
||||
x_feats = x.feats.unsqueeze(0)
|
||||
else:
|
||||
x_feats = x
|
||||
x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
|
||||
return x.replace(x_feats.squeeze(0)) if isinstance(x, VarLenTensor) else x_feats
|
||||
|
||||
def forward(self, x: SparseTensor, context: Optional[Union[VarLenTensor, torch.Tensor]] = None) -> SparseTensor:
|
||||
if self._type == "self":
|
||||
qkv = self._linear(self.to_qkv, x)
|
||||
qkv = self._fused_pre(qkv, num_fused=3)
|
||||
if self.qk_rms_norm or self.use_rope:
|
||||
q, k, v = qkv.unbind(dim=-3)
|
||||
if self.qk_rms_norm:
|
||||
q = self.q_rms_norm(q)
|
||||
k = self.k_rms_norm(k)
|
||||
if self.use_rope:
|
||||
q, k = self.rope(q, k)
|
||||
qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
|
||||
if self.attn_mode == "full":
|
||||
h = sparse_scaled_dot_product_attention(qkv)
|
||||
elif self.attn_mode == "windowed":
|
||||
h = sparse_windowed_scaled_dot_product_self_attention(
|
||||
qkv, self.window_size, shift_window=self.shift_window
|
||||
)
|
||||
elif self.attn_mode == "double_windowed":
|
||||
qkv0 = qkv.replace(qkv.feats[:, :, self.num_heads//2:])
|
||||
qkv1 = qkv.replace(qkv.feats[:, :, :self.num_heads//2])
|
||||
h0 = sparse_windowed_scaled_dot_product_self_attention(
|
||||
qkv0, self.window_size, shift_window=(0, 0, 0)
|
||||
)
|
||||
h1 = sparse_windowed_scaled_dot_product_self_attention(
|
||||
qkv1, self.window_size, shift_window=tuple([self.window_size//2] * 3)
|
||||
)
|
||||
h = qkv.replace(torch.cat([h0.feats, h1.feats], dim=1))
|
||||
else:
|
||||
q = self._linear(self.to_q, x)
|
||||
q = self._reshape_chs(q, (self.num_heads, -1))
|
||||
kv = self._linear(self.to_kv, context)
|
||||
kv = self._fused_pre(kv, num_fused=2)
|
||||
if self.qk_rms_norm:
|
||||
q = self.q_rms_norm(q)
|
||||
k, v = kv.unbind(dim=-3)
|
||||
k = self.k_rms_norm(k)
|
||||
h = sparse_scaled_dot_product_attention(q, k, v)
|
||||
else:
|
||||
h = sparse_scaled_dot_product_attention(q, kv)
|
||||
h = self._reshape_chs(h, (-1,))
|
||||
h = self._linear(self.to_out, h)
|
||||
return h
|
||||
|
||||
class ModulatedSparseTransformerBlock(nn.Module):
|
||||
"""
|
||||
Sparse Transformer block (MSA + FFN) with adaptive layer norm conditioning.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
num_heads: int,
|
||||
mlp_ratio: float = 4.0,
|
||||
attn_mode: Literal["full", "swin"] = "full",
|
||||
window_size: Optional[int] = None,
|
||||
shift_window: Optional[Tuple[int, int, int]] = None,
|
||||
use_checkpoint: bool = False,
|
||||
use_rope: bool = False,
|
||||
rope_freq: Tuple[float, float] = (1.0, 10000.0),
|
||||
qk_rms_norm: bool = False,
|
||||
qkv_bias: bool = True,
|
||||
share_mod: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.share_mod = share_mod
|
||||
self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
|
||||
self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
|
||||
self.attn = SparseMultiHeadAttention(
|
||||
channels,
|
||||
num_heads=num_heads,
|
||||
attn_mode=attn_mode,
|
||||
window_size=window_size,
|
||||
shift_window=shift_window,
|
||||
qkv_bias=qkv_bias,
|
||||
use_rope=use_rope,
|
||||
rope_freq=rope_freq,
|
||||
qk_rms_norm=qk_rms_norm,
|
||||
)
|
||||
self.mlp = SparseFeedForwardNet(
|
||||
channels,
|
||||
mlp_ratio=mlp_ratio,
|
||||
)
|
||||
if not share_mod:
|
||||
self.adaLN_modulation = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
nn.Linear(channels, 6 * channels, bias=True)
|
||||
)
|
||||
else:
|
||||
self.modulation = nn.Parameter(torch.randn(6 * channels) / channels ** 0.5)
|
||||
|
||||
def _forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
|
||||
if self.share_mod:
|
||||
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.modulation + mod).type(mod.dtype).chunk(6, dim=1)
|
||||
else:
|
||||
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
|
||||
h = x.replace(self.norm1(x.feats))
|
||||
h = h * (1 + scale_msa) + shift_msa
|
||||
h = self.attn(h)
|
||||
h = h * gate_msa
|
||||
x = x + h
|
||||
h = x.replace(self.norm2(x.feats))
|
||||
h = h * (1 + scale_mlp) + shift_mlp
|
||||
h = self.mlp(h)
|
||||
h = h * gate_mlp
|
||||
x = x + h
|
||||
return x
|
||||
|
||||
def forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
|
||||
if self.use_checkpoint:
|
||||
return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
|
||||
else:
|
||||
return self._forward(x, mod)
|
||||
|
||||
|
||||
class ModulatedSparseTransformerCrossBlock(nn.Module):
|
||||
"""
|
||||
Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
channels: int,
|
||||
ctx_channels: int,
|
||||
num_heads: int,
|
||||
mlp_ratio: float = 4.0,
|
||||
attn_mode: Literal["full", "swin"] = "full",
|
||||
window_size: Optional[int] = None,
|
||||
shift_window: Optional[Tuple[int, int, int]] = None,
|
||||
use_checkpoint: bool = False,
|
||||
use_rope: bool = False,
|
||||
rope_freq: Tuple[float, float] = (1.0, 10000.0),
|
||||
qk_rms_norm: bool = False,
|
||||
qk_rms_norm_cross: bool = False,
|
||||
qkv_bias: bool = True,
|
||||
share_mod: bool = False,
|
||||
|
||||
):
|
||||
super().__init__()
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.share_mod = share_mod
|
||||
self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
|
||||
self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
|
||||
self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
|
||||
self.self_attn = SparseMultiHeadAttention(
|
||||
channels,
|
||||
num_heads=num_heads,
|
||||
type="self",
|
||||
attn_mode=attn_mode,
|
||||
window_size=window_size,
|
||||
shift_window=shift_window,
|
||||
qkv_bias=qkv_bias,
|
||||
use_rope=use_rope,
|
||||
rope_freq=rope_freq,
|
||||
qk_rms_norm=qk_rms_norm,
|
||||
)
|
||||
self.cross_attn = SparseMultiHeadAttention(
|
||||
channels,
|
||||
ctx_channels=ctx_channels,
|
||||
num_heads=num_heads,
|
||||
type="cross",
|
||||
attn_mode="full",
|
||||
qkv_bias=qkv_bias,
|
||||
qk_rms_norm=qk_rms_norm_cross,
|
||||
)
|
||||
self.mlp = SparseFeedForwardNet(
|
||||
channels,
|
||||
mlp_ratio=mlp_ratio,
|
||||
)
|
||||
if not share_mod:
|
||||
self.adaLN_modulation = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
nn.Linear(channels, 6 * channels, bias=True)
|
||||
)
|
||||
else:
|
||||
self.modulation = nn.Parameter(torch.randn(6 * channels) / channels ** 0.5)
|
||||
|
||||
def _forward(self, x: SparseTensor, mod: torch.Tensor, context: Union[torch.Tensor, VarLenTensor]) -> SparseTensor:
|
||||
if self.share_mod:
|
||||
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.modulation + mod).type(mod.dtype).chunk(6, dim=1)
|
||||
else:
|
||||
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
|
||||
h = x.replace(self.norm1(x.feats))
|
||||
h = h * (1 + scale_msa) + shift_msa
|
||||
h = self.self_attn(h)
|
||||
h = h * gate_msa
|
||||
x = x + h
|
||||
h = x.replace(self.norm2(x.feats))
|
||||
h = self.cross_attn(h, context)
|
||||
x = x + h
|
||||
h = x.replace(self.norm3(x.feats))
|
||||
h = h * (1 + scale_mlp) + shift_mlp
|
||||
h = self.mlp(h)
|
||||
h = h * gate_mlp
|
||||
x = x + h
|
||||
return x
|
||||
|
||||
def forward(self, x: SparseTensor, mod: torch.Tensor, context: Union[torch.Tensor, VarLenTensor]) -> SparseTensor:
|
||||
return self._forward(x, mod, context)
|
||||
|
||||
|
||||
class SLatFlowModel(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
resolution: int,
|
||||
in_channels: int,
|
||||
model_channels: int,
|
||||
cond_channels: int,
|
||||
out_channels: int,
|
||||
num_blocks: int,
|
||||
num_heads: Optional[int] = None,
|
||||
num_head_channels: Optional[int] = 64,
|
||||
mlp_ratio: float = 4,
|
||||
pe_mode: Literal["ape", "rope"] = "rope",
|
||||
rope_freq: Tuple[float, float] = (1.0, 10000.0),
|
||||
use_checkpoint: bool = False,
|
||||
share_mod: bool = False,
|
||||
initialization: str = 'vanilla',
|
||||
qk_rms_norm: bool = False,
|
||||
qk_rms_norm_cross: bool = False,
|
||||
dtype = None,
|
||||
device = None,
|
||||
operations = None,
|
||||
):
|
||||
super().__init__()
|
||||
self.resolution = resolution
|
||||
self.in_channels = in_channels
|
||||
self.model_channels = model_channels
|
||||
self.cond_channels = cond_channels
|
||||
self.out_channels = out_channels
|
||||
self.num_blocks = num_blocks
|
||||
self.num_heads = num_heads or model_channels // num_head_channels
|
||||
self.mlp_ratio = mlp_ratio
|
||||
self.pe_mode = pe_mode
|
||||
self.use_checkpoint = use_checkpoint
|
||||
self.share_mod = share_mod
|
||||
self.initialization = initialization
|
||||
self.qk_rms_norm = qk_rms_norm
|
||||
self.qk_rms_norm_cross = qk_rms_norm_cross
|
||||
self.dtype = dtype
|
||||
|
||||
self.t_embedder = TimestepEmbedder(model_channels)
|
||||
if share_mod:
|
||||
self.adaLN_modulation = nn.Sequential(
|
||||
nn.SiLU(),
|
||||
nn.Linear(model_channels, 6 * model_channels, bias=True)
|
||||
)
|
||||
|
||||
self.input_layer = SparseLinear(in_channels, model_channels)
|
||||
|
||||
self.blocks = nn.ModuleList([
|
||||
ModulatedSparseTransformerCrossBlock(
|
||||
model_channels,
|
||||
cond_channels,
|
||||
num_heads=self.num_heads,
|
||||
mlp_ratio=self.mlp_ratio,
|
||||
attn_mode='full',
|
||||
use_checkpoint=self.use_checkpoint,
|
||||
use_rope=(pe_mode == "rope"),
|
||||
rope_freq=rope_freq,
|
||||
share_mod=self.share_mod,
|
||||
qk_rms_norm=self.qk_rms_norm,
|
||||
qk_rms_norm_cross=self.qk_rms_norm_cross,
|
||||
)
|
||||
for _ in range(num_blocks)
|
||||
])
|
||||
|
||||
self.out_layer = SparseLinear(model_channels, out_channels)
|
||||
|
||||
@property
|
||||
def device(self) -> torch.device:
|
||||
return next(self.parameters()).device
|
||||
|
||||
def forward(
|
||||
self,
|
||||
x: SparseTensor,
|
||||
t: torch.Tensor,
|
||||
cond: Union[torch.Tensor, List[torch.Tensor]],
|
||||
concat_cond: Optional[SparseTensor] = None,
|
||||
**kwargs
|
||||
) -> SparseTensor:
|
||||
if concat_cond is not None:
|
||||
x = sparse_cat([x, concat_cond], dim=-1)
|
||||
if isinstance(cond, list):
|
||||
cond = VarLenTensor.from_tensor_list(cond)
|
||||
|
||||
h = self.input_layer(x)
|
||||
h = manual_cast(h, self.dtype)
|
||||
t_emb = self.t_embedder(t)
|
||||
if self.share_mod:
|
||||
t_emb = self.adaLN_modulation(t_emb)
|
||||
t_emb = manual_cast(t_emb, self.dtype)
|
||||
cond = manual_cast(cond, self.dtype)
|
||||
|
||||
if self.pe_mode == "ape":
|
||||
pe = self.pos_embedder(h.coords[:, 1:])
|
||||
h = h + manual_cast(pe, self.dtype)
|
||||
for block in self.blocks:
|
||||
h = block(h, t_emb, cond)
|
||||
|
||||
h = manual_cast(h, x.dtype)
|
||||
h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
|
||||
h = self.out_layer(h)
|
||||
return h
|
||||
|
||||
class Trellis2(nn.Module):
|
||||
def __init__(self, resolution,
|
||||
in_channels = 32,
|
||||
out_channels = 32,
|
||||
model_channels = 1536,
|
||||
cond_channels = 1024,
|
||||
num_blocks = 30,
|
||||
num_heads = 12,
|
||||
mlp_ratio = 5.3334,
|
||||
share_mod = True,
|
||||
qk_rms_norm = True,
|
||||
qk_rms_norm_cross = True,
|
||||
dtype=None, device=None, operations=None):
|
||||
args = {
|
||||
"out_channels":out_channels, "num_blocks":num_blocks, "cond_channels" :cond_channels,
|
||||
"model_channels":model_channels, "num_heads":num_heads, "mlp_ratio": mlp_ratio, "share_mod": share_mod,
|
||||
"qk_rms_norm": qk_rms_norm, "qk_rms_norm_cross": qk_rms_norm_cross, "device": device, "dtype": dtype, "operations": operations
|
||||
}
|
||||
# TODO: update the names/checkpoints
|
||||
self.img2shape = SLatFlowModel(resolution, in_channels=in_channels, *args)
|
||||
self.shape2txt = SLatFlowModel(resolution, in_channels=in_channels*2, *args)
|
||||
self.shape_generation = True
|
||||
|
||||
def forward(self, x, timestep, context):
|
||||
pass
|
||||
1185
comfy/ldm/trellis2/vae.py
Normal file
1185
comfy/ldm/trellis2/vae.py
Normal file
File diff suppressed because it is too large
Load Diff
240
comfy_extras/trellis2.py
Normal file
240
comfy_extras/trellis2.py
Normal file
@ -0,0 +1,240 @@
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, IO
|
||||
import torch
|
||||
from comfy.ldm.trellis2.model import SparseTensor
|
||||
import comfy.model_management
|
||||
from PIL import Image
|
||||
import PIL
|
||||
import numpy as np
|
||||
|
||||
shape_slat_normalization = {
|
||||
"mean": torch.tensor([
|
||||
0.781296, 0.018091, -0.495192, -0.558457, 1.060530, 0.093252, 1.518149, -0.933218,
|
||||
-0.732996, 2.604095, -0.118341, -2.143904, 0.495076, -2.179512, -2.130751, -0.996944,
|
||||
0.261421, -2.217463, 1.260067, -0.150213, 3.790713, 1.481266, -1.046058, -1.523667,
|
||||
-0.059621, 2.220780, 1.621212, 0.877230, 0.567247, -3.175944, -3.186688, 1.578665
|
||||
])[None],
|
||||
"std": torch.tensor([
|
||||
5.972266, 4.706852, 5.445010, 5.209927, 5.320220, 4.547237, 5.020802, 5.444004,
|
||||
5.226681, 5.683095, 4.831436, 5.286469, 5.652043, 5.367606, 5.525084, 4.730578,
|
||||
4.805265, 5.124013, 5.530808, 5.619001, 5.103930, 5.417670, 5.269677, 5.547194,
|
||||
5.634698, 5.235274, 6.110351, 5.511298, 6.237273, 4.879207, 5.347008, 5.405691
|
||||
])[None]
|
||||
}
|
||||
|
||||
tex_slat_normalization = {
|
||||
"mean": torch.tensor([
|
||||
3.501659, 2.212398, 2.226094, 0.251093, -0.026248, -0.687364, 0.439898, -0.928075,
|
||||
0.029398, -0.339596, -0.869527, 1.038479, -0.972385, 0.126042, -1.129303, 0.455149,
|
||||
-1.209521, 2.069067, 0.544735, 2.569128, -0.323407, 2.293000, -1.925608, -1.217717,
|
||||
1.213905, 0.971588, -0.023631, 0.106750, 2.021786, 0.250524, -0.662387, -0.768862
|
||||
])[None],
|
||||
"std": torch.tensor([
|
||||
2.665652, 2.743913, 2.765121, 2.595319, 3.037293, 2.291316, 2.144656, 2.911822,
|
||||
2.969419, 2.501689, 2.154811, 3.163343, 2.621215, 2.381943, 3.186697, 3.021588,
|
||||
2.295916, 3.234985, 3.233086, 2.260140, 2.874801, 2.810596, 3.292720, 2.674999,
|
||||
2.680878, 2.372054, 2.451546, 2.353556, 2.995195, 2.379849, 2.786195, 2.775190
|
||||
])[None]
|
||||
}
|
||||
|
||||
def smart_crop_square(
|
||||
image: torch.Tensor,
|
||||
background_color=(128, 128, 128),
|
||||
):
|
||||
C, H, W = image.shape
|
||||
size = max(H, W)
|
||||
canvas = torch.empty(
|
||||
(C, size, size),
|
||||
dtype=image.dtype,
|
||||
device=image.device
|
||||
)
|
||||
for c in range(C):
|
||||
canvas[c].fill_(background_color[c])
|
||||
top = (size - H) // 2
|
||||
left = (size - W) // 2
|
||||
canvas[:, top:top + H, left:left + W] = image
|
||||
|
||||
return canvas
|
||||
|
||||
def run_conditioning(
|
||||
model,
|
||||
image: torch.Tensor,
|
||||
include_1024: bool = True,
|
||||
background_color: str = "black",
|
||||
):
|
||||
# TODO: should check if normalization was applied in these steps
|
||||
model = model.model
|
||||
device = comfy.model_management.intermediate_device() # replaces .cpu()
|
||||
torch_device = comfy.model_management.get_torch_device() # replaces .cuda()
|
||||
bg_colors = {
|
||||
"black": (0, 0, 0),
|
||||
"gray": (128, 128, 128),
|
||||
"white": (255, 255, 255),
|
||||
}
|
||||
bg_color = bg_colors.get(background_color, (128, 128, 128))
|
||||
|
||||
# Convert image to PIL
|
||||
if image.dim() == 4:
|
||||
pil_image = (image[0] * 255).clip(0, 255).astype(torch.uint8)
|
||||
else:
|
||||
pil_image = (image * 255).clip(0, 255).astype(torch.uint8)
|
||||
|
||||
pil_image = smart_crop_square(pil_image, background_color=bg_color)
|
||||
|
||||
model.image_size = 512
|
||||
def set_image_size(image, image_size=512):
|
||||
image = PIL.from_array(image)
|
||||
image = [i.resize((image_size, image_size), Image.LANCZOS) for i in image]
|
||||
image = [np.array(i.convert('RGB')).astype(np.float32) / 255 for i in image]
|
||||
image = [torch.from_numpy(i).permute(2, 0, 1).float() for i in image]
|
||||
image = torch.stack(image).to(torch_device)
|
||||
return image
|
||||
|
||||
pil_image = set_image_size(image, 512)
|
||||
cond_512 = model([pil_image])
|
||||
|
||||
cond_1024 = None
|
||||
if include_1024:
|
||||
model.image_size = 1024
|
||||
pil_image = set_image_size(pil_image, 1024)
|
||||
cond_1024 = model([pil_image])
|
||||
|
||||
neg_cond = torch.zeros_like(cond_512)
|
||||
|
||||
conditioning = {
|
||||
'cond_512': cond_512.to(device),
|
||||
'neg_cond': neg_cond.to(device),
|
||||
}
|
||||
if cond_1024 is not None:
|
||||
conditioning['cond_1024'] = cond_1024.to(device)
|
||||
|
||||
preprocessed_tensor = pil_image.to(torch.float32) / 255.0
|
||||
preprocessed_tensor = torch.from_numpy(preprocessed_tensor).unsqueeze(0)
|
||||
|
||||
return conditioning, preprocessed_tensor
|
||||
|
||||
class VaeDecodeShapeTrellis(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="VaeDecodeShapeTrellis",
|
||||
category="latent/3d",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
IO.Int.Input("resolution", tooltip="Shape Generation Resolution"),
|
||||
],
|
||||
outputs=[
|
||||
IO.Mesh.Output("mesh"),
|
||||
IO.AnyType.Output("shape_subs"),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, vae, resolution):
|
||||
std = shape_slat_normalization["std"]
|
||||
mean = shape_slat_normalization["mean"]
|
||||
samples = samples * std + mean
|
||||
|
||||
mesh, subs = vae.decode_shape_slat(resolution, samples)
|
||||
return mesh, subs
|
||||
|
||||
class VaeDecodeTextureTrellis(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="VaeDecodeTextureTrellis",
|
||||
category="latent/3d",
|
||||
inputs=[
|
||||
IO.Latent.Input("samples"),
|
||||
IO.Vae.Input("vae"),
|
||||
IO.AnyType.Input("shape_subs"),
|
||||
],
|
||||
outputs=[
|
||||
IO.Mesh.Output("mesh"),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, samples, vae, shape_subs):
|
||||
if shape_subs is None:
|
||||
raise ValueError("Shape subs must be provided for texture generation")
|
||||
|
||||
std = tex_slat_normalization["std"]
|
||||
mean = tex_slat_normalization["mean"]
|
||||
samples = samples * std + mean
|
||||
|
||||
mesh = vae.decode_tex_slat(samples, shape_subs)
|
||||
return mesh
|
||||
|
||||
class Trellis2Conditioning(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="Trellis2Conditioning",
|
||||
category="conditioning/video_models",
|
||||
inputs=[
|
||||
IO.ClipVision.Input("clip_vision_model"),
|
||||
IO.Image.Input("image"),
|
||||
IO.MultiCombo.Input("background_color", options=["black", "gray", "white"], default="black")
|
||||
],
|
||||
outputs=[
|
||||
IO.Conditioning.Output(display_name="positive"),
|
||||
IO.Conditioning.Output(display_name="negative"),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, clip_vision_model, image, background_color) -> IO.NodeOutput:
|
||||
# could make 1024 an option
|
||||
conditioning, _ = run_conditioning(clip_vision_model, image, include_1024=True, background_color=background_color)
|
||||
embeds = conditioning["cond_1024"] # should add that
|
||||
positive = [[conditioning["cond_512"], {embeds}]]
|
||||
negative = [[conditioning["cond_neg"], {embeds}]]
|
||||
return IO.NodeOutput(positive, negative)
|
||||
|
||||
class EmptyLatentTrellis2(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="EmptyLatentTrellis2",
|
||||
category="latent/3d",
|
||||
inputs=[
|
||||
IO.Int.Input("resolution", default=3072, min=1, max=8192),
|
||||
IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."),
|
||||
IO.Vae.Input("vae"),
|
||||
IO.Boolean.Input("shape_generation", tooltip="Setting to false will generate texture."),
|
||||
IO.MultiCombo.Input("generation_type", options=["structure_generation", "shape_generation", "texture_generation"])
|
||||
],
|
||||
outputs=[
|
||||
IO.Latent.Output(),
|
||||
]
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def execute(cls, batch_size, coords, vae, generation_type) -> IO.NodeOutput:
|
||||
# TODO: i will probably update how shape/texture is generated
|
||||
# could split this too
|
||||
in_channels = 32
|
||||
shape_generation = generation_type == "shape_generation"
|
||||
device = comfy.model_management.intermediate_device()
|
||||
if shape_generation:
|
||||
latent = SparseTensor(feats=torch.randn(batch_size, in_channels).to(device), coords=coords)
|
||||
else:
|
||||
# coords = shape_slat in txt gen case
|
||||
latent = coords.replace(feats=torch.randn(coords.coords.shape[0], in_channels - coords.feats.shape[1]).to(device))
|
||||
return IO.NodeOutput({"samples": latent, "type": "trellis2"})
|
||||
|
||||
class Trellis2Extension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
Trellis2Conditioning,
|
||||
EmptyLatentTrellis2,
|
||||
VaeDecodeTextureTrellis,
|
||||
VaeDecodeShapeTrellis
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> Trellis2Extension:
|
||||
return Trellis2Extension()
|
||||
Loading…
Reference in New Issue
Block a user