mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-05-15 19:47:24 +08:00
1857 lines
69 KiB
Python
1857 lines
69 KiB
Python
from typing_extensions import override
|
|
from comfy_api.latest import ComfyExtension, IO, Types
|
|
from comfy.ldm.trellis2.vae import SparseTensor
|
|
import comfy.model_management
|
|
from PIL import Image
|
|
import numpy as np
|
|
import triton.language as tl
|
|
import logging
|
|
import triton
|
|
import torch
|
|
import scipy
|
|
import copy
|
|
|
|
def prepare_trellis_vae_for_decode(vae, sample_shape):
|
|
memory_required = vae.memory_used_decode(sample_shape, vae.vae_dtype)
|
|
if len(sample_shape) == 5:
|
|
memory_required *= max(1, int(sample_shape[4]))
|
|
memory_required = max(1, int(memory_required))
|
|
device = comfy.model_management.get_torch_device()
|
|
comfy.model_management.load_models_gpu(
|
|
[vae.patcher],
|
|
memory_required=memory_required,
|
|
force_full_load=getattr(vae, "disable_offload", False),
|
|
)
|
|
free_memory = vae.patcher.get_free_memory(device)
|
|
batch_number = max(1, int(free_memory / memory_required))
|
|
return batch_number
|
|
|
|
|
|
def pack_variable_mesh_batch(vertices, faces, colors=None):
|
|
batch_size = len(vertices)
|
|
max_vertices = max(v.shape[0] for v in vertices)
|
|
max_faces = max(f.shape[0] for f in faces)
|
|
|
|
packed_vertices = vertices[0].new_zeros((batch_size, max_vertices, vertices[0].shape[1]))
|
|
packed_faces = faces[0].new_zeros((batch_size, max_faces, faces[0].shape[1]))
|
|
vertex_counts = torch.tensor([v.shape[0] for v in vertices], device=vertices[0].device, dtype=torch.int64)
|
|
face_counts = torch.tensor([f.shape[0] for f in faces], device=faces[0].device, dtype=torch.int64)
|
|
|
|
for i, (v, f) in enumerate(zip(vertices, faces)):
|
|
packed_vertices[i, :v.shape[0]] = v
|
|
packed_faces[i, :f.shape[0]] = f
|
|
|
|
mesh = Types.MESH(packed_vertices, packed_faces)
|
|
mesh.vertex_counts = vertex_counts
|
|
mesh.face_counts = face_counts
|
|
|
|
if colors is not None:
|
|
max_colors = max(c.shape[0] for c in colors)
|
|
packed_colors = colors[0].new_zeros((batch_size, max_colors, colors[0].shape[1]))
|
|
color_counts = torch.tensor([c.shape[0] for c in colors], device=colors[0].device, dtype=torch.int64)
|
|
for i, c in enumerate(colors):
|
|
packed_colors[i, :c.shape[0]] = c
|
|
mesh.colors = packed_colors
|
|
mesh.color_counts = color_counts
|
|
|
|
return mesh
|
|
|
|
|
|
def get_mesh_batch_item(mesh, index):
|
|
if hasattr(mesh, "vertex_counts"):
|
|
vertex_count = int(mesh.vertex_counts[index].item())
|
|
face_count = int(mesh.face_counts[index].item())
|
|
vertices = mesh.vertices[index, :vertex_count]
|
|
faces = mesh.faces[index, :face_count]
|
|
colors = None
|
|
if hasattr(mesh, "colors") and mesh.colors is not None:
|
|
if hasattr(mesh, "color_counts"):
|
|
color_count = int(mesh.color_counts[index].item())
|
|
colors = mesh.colors[index, :color_count]
|
|
else:
|
|
colors = mesh.colors[index, :vertex_count]
|
|
return vertices, faces, colors
|
|
|
|
colors = None
|
|
if hasattr(mesh, "colors") and mesh.colors is not None:
|
|
colors = mesh.colors[index]
|
|
return mesh.vertices[index], mesh.faces[index], colors
|
|
|
|
shape_slat_normalization = {
|
|
"mean": torch.tensor([
|
|
0.781296, 0.018091, -0.495192, -0.558457, 1.060530, 0.093252, 1.518149, -0.933218,
|
|
-0.732996, 2.604095, -0.118341, -2.143904, 0.495076, -2.179512, -2.130751, -0.996944,
|
|
0.261421, -2.217463, 1.260067, -0.150213, 3.790713, 1.481266, -1.046058, -1.523667,
|
|
-0.059621, 2.220780, 1.621212, 0.877230, 0.567247, -3.175944, -3.186688, 1.578665
|
|
])[None],
|
|
"std": torch.tensor([
|
|
5.972266, 4.706852, 5.445010, 5.209927, 5.320220, 4.547237, 5.020802, 5.444004,
|
|
5.226681, 5.683095, 4.831436, 5.286469, 5.652043, 5.367606, 5.525084, 4.730578,
|
|
4.805265, 5.124013, 5.530808, 5.619001, 5.103930, 5.417670, 5.269677, 5.547194,
|
|
5.634698, 5.235274, 6.110351, 5.511298, 6.237273, 4.879207, 5.347008, 5.405691
|
|
])[None]
|
|
}
|
|
|
|
tex_slat_normalization = {
|
|
"mean": torch.tensor([
|
|
3.501659, 2.212398, 2.226094, 0.251093, -0.026248, -0.687364, 0.439898, -0.928075,
|
|
0.029398, -0.339596, -0.869527, 1.038479, -0.972385, 0.126042, -1.129303, 0.455149,
|
|
-1.209521, 2.069067, 0.544735, 2.569128, -0.323407, 2.293000, -1.925608, -1.217717,
|
|
1.213905, 0.971588, -0.023631, 0.106750, 2.021786, 0.250524, -0.662387, -0.768862
|
|
])[None],
|
|
"std": torch.tensor([
|
|
2.665652, 2.743913, 2.765121, 2.595319, 3.037293, 2.291316, 2.144656, 2.911822,
|
|
2.969419, 2.501689, 2.154811, 3.163343, 2.621215, 2.381943, 3.186697, 3.021588,
|
|
2.295916, 3.234985, 3.233086, 2.260140, 2.874801, 2.810596, 3.292720, 2.674999,
|
|
2.680878, 2.372054, 2.451546, 2.353556, 2.995195, 2.379849, 2.786195, 2.775190
|
|
])[None]
|
|
}
|
|
|
|
def shape_norm(shape_latent, coords):
|
|
std = shape_slat_normalization["std"].to(shape_latent)
|
|
mean = shape_slat_normalization["mean"].to(shape_latent)
|
|
samples = SparseTensor(feats = shape_latent, coords=coords)
|
|
samples = samples * std + mean
|
|
return samples
|
|
|
|
|
|
def infer_batched_coord_layout(coords):
|
|
if coords.ndim != 2 or coords.shape[1] != 4:
|
|
raise ValueError(f"Expected Trellis2 coords with shape [N, 4], got {tuple(coords.shape)}")
|
|
|
|
if coords.shape[0] == 0:
|
|
raise ValueError("Trellis2 coords can't be empty")
|
|
|
|
batch_ids = coords[:, 0].to(torch.int64)
|
|
if (batch_ids < 0).any():
|
|
raise ValueError(f"Trellis2 batch ids must be non-negative, got {batch_ids.unique(sorted=True).tolist()}")
|
|
batch_size = int(batch_ids.max().item()) + 1
|
|
counts = torch.bincount(batch_ids, minlength=batch_size)
|
|
|
|
if (counts == 0).any():
|
|
raise ValueError(f"Non-contiguous Trellis2 batch ids in coords: {batch_ids.unique(sorted=True).tolist()}")
|
|
|
|
max_tokens = int(counts.max().item())
|
|
return batch_size, counts, max_tokens
|
|
|
|
|
|
def split_batched_coords(coords, coord_counts):
|
|
if coord_counts.ndim != 1:
|
|
raise ValueError(f"Trellis2 coord_counts must be 1D, got shape {tuple(coord_counts.shape)}")
|
|
if (coord_counts < 0).any():
|
|
raise ValueError(f"Trellis2 coord_counts must be non-negative, got {coord_counts.tolist()}")
|
|
if int(coord_counts.sum().item()) != coords.shape[0]:
|
|
raise ValueError(
|
|
f"Trellis2 coord_counts total {int(coord_counts.sum().item())} does not match coords rows {coords.shape[0]}"
|
|
)
|
|
|
|
batch_ids = coords[:, 0].to(torch.int64)
|
|
order = torch.argsort(batch_ids, stable=True)
|
|
sorted_coords = coords.index_select(0, order)
|
|
sorted_batch_ids = batch_ids.index_select(0, order)
|
|
|
|
offsets = coord_counts.cumsum(0) - coord_counts
|
|
items = []
|
|
for i in range(coord_counts.shape[0]):
|
|
count = int(coord_counts[i].item())
|
|
start = int(offsets[i].item())
|
|
coords_i = sorted_coords[start:start + count]
|
|
ids_i = sorted_batch_ids[start:start + count]
|
|
if coords_i.shape[0] != count or not torch.all(ids_i == i):
|
|
raise ValueError(f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}")
|
|
items.append(coords_i)
|
|
return items
|
|
|
|
def flatten_batched_sparse_latent(samples, coords, coord_counts):
|
|
samples = samples.squeeze(-1).transpose(1, 2)
|
|
if coord_counts is None:
|
|
return samples.reshape(-1, samples.shape[-1]), coords
|
|
|
|
coords_items = split_batched_coords(coords, coord_counts)
|
|
feat_list = []
|
|
coord_list = []
|
|
for i, coords_i in enumerate(coords_items):
|
|
count = int(coord_counts[i].item())
|
|
feat_list.append(samples[i, :count])
|
|
coord_list.append(coords_i)
|
|
|
|
return torch.cat(feat_list, dim=0), torch.cat(coord_list, dim=0)
|
|
|
|
|
|
def split_batched_sparse_latent(samples, coords, coord_counts):
|
|
samples = samples.squeeze(-1).transpose(1, 2)
|
|
if coord_counts is None:
|
|
return [(samples.reshape(-1, samples.shape[-1]), coords)]
|
|
|
|
coords_items = split_batched_coords(coords, coord_counts)
|
|
items = []
|
|
for i, coords_i in enumerate(coords_items):
|
|
count = int(coord_counts[i].item())
|
|
items.append((samples[i, :count], coords_i))
|
|
return items
|
|
|
|
def paint_mesh_with_voxels(mesh, voxel_coords, voxel_colors, resolution):
|
|
"""
|
|
Generic function to paint a mesh using nearest-neighbor colors from a sparse voxel field.
|
|
"""
|
|
device = comfy.model_management.vae_offload_device()
|
|
|
|
origin = torch.tensor([-0.5, -0.5, -0.5], device=device)
|
|
# TODO: generic independent node? if so: figure how pass the resolution parameter
|
|
voxel_size = 1.0 / resolution
|
|
|
|
# map voxels
|
|
voxel_pos = voxel_coords.to(device).float() * voxel_size + origin
|
|
verts = mesh.vertices.to(device).squeeze(0)
|
|
voxel_colors = voxel_colors.to(device)
|
|
|
|
voxel_pos_np = voxel_pos.numpy()
|
|
verts_np = verts.numpy()
|
|
|
|
tree = scipy.spatial.cKDTree(voxel_pos_np)
|
|
|
|
# nearest neighbour k=1
|
|
_, nearest_idx_np = tree.query(verts_np, k=1, workers=-1)
|
|
|
|
nearest_idx = torch.from_numpy(nearest_idx_np).long()
|
|
v_colors = voxel_colors[nearest_idx]
|
|
|
|
# to [0, 1]
|
|
srgb_colors = v_colors.clamp(0, 1)#(v_colors * 0.5 + 0.5).clamp(0, 1)
|
|
|
|
# to Linear RGB (required for GLTF)
|
|
linear_colors = torch.pow(srgb_colors, 2.2)
|
|
|
|
final_colors = linear_colors.unsqueeze(0)
|
|
|
|
out_mesh = copy.deepcopy(mesh)
|
|
out_mesh.colors = final_colors
|
|
|
|
return out_mesh
|
|
|
|
|
|
def paint_mesh_default_colors(mesh):
|
|
out_mesh = copy.copy(mesh)
|
|
vertex_count = mesh.vertices.shape[1]
|
|
out_mesh.colors = mesh.vertices.new_zeros((1, vertex_count, 3))
|
|
return out_mesh
|
|
|
|
class VaeDecodeShapeTrellis(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="VaeDecodeShapeTrellis",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Latent.Input("samples"),
|
|
IO.Vae.Input("vae"),
|
|
IO.Combo.Input("resolution", options=["512", "1024"], default="1024")
|
|
],
|
|
outputs=[
|
|
IO.Mesh.Output("mesh"),
|
|
IO.AnyType.Output("shape_subs"),
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, samples, vae, resolution):
|
|
|
|
resolution = int(resolution)
|
|
sample_tensor = samples["samples"]
|
|
device = comfy.model_management.get_torch_device()
|
|
coords = samples["coords"]
|
|
prepare_trellis_vae_for_decode(vae, sample_tensor.shape)
|
|
trellis_vae = vae.first_stage_model
|
|
coord_counts = samples.get("coord_counts")
|
|
|
|
samples = samples["samples"]
|
|
if coord_counts is None:
|
|
samples, coords = flatten_batched_sparse_latent(samples, coords, coord_counts)
|
|
samples = shape_norm(samples.to(device), coords.to(device))
|
|
mesh, subs = trellis_vae.decode_shape_slat(samples, resolution)
|
|
else:
|
|
split_items = split_batched_sparse_latent(samples, coords, coord_counts)
|
|
mesh = []
|
|
subs_per_sample = []
|
|
for feats_i, coords_i in split_items:
|
|
coords_i = coords_i.to(device).clone()
|
|
coords_i[:, 0] = 0
|
|
sample_i = shape_norm(feats_i.to(device), coords_i)
|
|
mesh_i, subs_i = trellis_vae.decode_shape_slat(sample_i, resolution)
|
|
mesh.append(mesh_i[0])
|
|
subs_per_sample.append(subs_i)
|
|
|
|
subs = []
|
|
for stage_index in range(len(subs_per_sample[0])):
|
|
stage_tensors = [sample_subs[stage_index] for sample_subs in subs_per_sample]
|
|
feats_list = [stage_tensor.feats for stage_tensor in stage_tensors]
|
|
coords_list = [stage_tensor.coords for stage_tensor in stage_tensors]
|
|
subs.append(SparseTensor.from_tensor_list(feats_list, coords_list))
|
|
|
|
face_list = [m.faces for m in mesh]
|
|
vert_list = [m.vertices for m in mesh]
|
|
if all(v.shape == vert_list[0].shape for v in vert_list) and all(f.shape == face_list[0].shape for f in face_list):
|
|
mesh = Types.MESH(vertices=torch.stack(vert_list), faces=torch.stack(face_list))
|
|
else:
|
|
mesh = pack_variable_mesh_batch(vert_list, face_list)
|
|
return IO.NodeOutput(mesh, subs)
|
|
|
|
class VaeDecodeTextureTrellis(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="VaeDecodeTextureTrellis",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Mesh.Input("shape_mesh"),
|
|
IO.Latent.Input("samples"),
|
|
IO.Vae.Input("vae"),
|
|
IO.AnyType.Input("shape_subs"),
|
|
IO.Combo.Input("resolution", options=["512", "1024"], default="1024")
|
|
],
|
|
outputs=[
|
|
IO.Mesh.Output("mesh"),
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, shape_mesh, samples, vae, shape_subs, resolution):
|
|
|
|
sample_tensor = samples["samples"]
|
|
resolution = int(resolution)
|
|
device = comfy.model_management.get_torch_device()
|
|
coords = samples["coords"]
|
|
prepare_trellis_vae_for_decode(vae, sample_tensor.shape)
|
|
trellis_vae = vae.first_stage_model
|
|
coord_counts = samples.get("coord_counts")
|
|
|
|
samples = samples["samples"]
|
|
samples, coords = flatten_batched_sparse_latent(samples, coords, coord_counts)
|
|
samples = samples.to(device)
|
|
std = tex_slat_normalization["std"].to(samples)
|
|
mean = tex_slat_normalization["mean"].to(samples)
|
|
samples = SparseTensor(feats = samples, coords=coords.to(device))
|
|
samples = samples * std + mean
|
|
|
|
voxel = trellis_vae.decode_tex_slat(samples, shape_subs)
|
|
color_feats = voxel.feats[:, :3]
|
|
voxel_coords = voxel.coords[:, 1:]
|
|
voxel_batch_idx = voxel.coords[:, 0]
|
|
|
|
mesh_batch_size = shape_mesh.vertices.shape[0]
|
|
if mesh_batch_size > 1:
|
|
out_verts, out_faces, out_colors = [], [], []
|
|
for i in range(mesh_batch_size):
|
|
sel = voxel_batch_idx == i
|
|
item_coords = voxel_coords[sel]
|
|
item_colors = color_feats[sel]
|
|
item_vertices, item_faces, _ = get_mesh_batch_item(shape_mesh, i)
|
|
item_mesh = Types.MESH(vertices=item_vertices.unsqueeze(0), faces=item_faces.unsqueeze(0))
|
|
if item_coords.shape[0] == 0:
|
|
painted = paint_mesh_default_colors(item_mesh)
|
|
else:
|
|
painted = paint_mesh_with_voxels(item_mesh, item_coords, item_colors, resolution=resolution)
|
|
out_verts.append(painted.vertices.squeeze(0))
|
|
out_faces.append(painted.faces.squeeze(0))
|
|
out_colors.append(painted.colors.squeeze(0))
|
|
out_mesh = pack_variable_mesh_batch(out_verts, out_faces, out_colors)
|
|
else:
|
|
if voxel_coords.shape[0] == 0:
|
|
out_mesh = paint_mesh_default_colors(shape_mesh)
|
|
else:
|
|
out_mesh = paint_mesh_with_voxels(shape_mesh, voxel_coords, color_feats, resolution=resolution)
|
|
return IO.NodeOutput(out_mesh)
|
|
|
|
class VaeDecodeStructureTrellis2(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="VaeDecodeStructureTrellis2",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Latent.Input("samples"),
|
|
IO.Vae.Input("vae"),
|
|
IO.Combo.Input("resolution", options=["32", "64"], default="32")
|
|
],
|
|
outputs=[
|
|
IO.Voxel.Output("structure_output"),
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, samples, vae, resolution):
|
|
resolution = int(resolution)
|
|
sample_tensor = samples["samples"]
|
|
sample_tensor = sample_tensor[:, :8]
|
|
batch_number = prepare_trellis_vae_for_decode(vae, sample_tensor.shape)
|
|
decoder = vae.first_stage_model.struct_dec
|
|
load_device = comfy.model_management.get_torch_device()
|
|
decoded_batches = []
|
|
for start in range(0, sample_tensor.shape[0], batch_number):
|
|
sample_chunk = sample_tensor[start:start + batch_number].to(load_device)
|
|
decoded_batches.append(decoder(sample_chunk) > 0)
|
|
decoded = torch.cat(decoded_batches, dim=0)
|
|
current_res = decoded.shape[2]
|
|
|
|
if current_res != resolution:
|
|
ratio = current_res // resolution
|
|
decoded = torch.nn.functional.max_pool3d(decoded.float(), ratio, ratio, 0) > 0.5
|
|
out = Types.VOXEL(decoded.squeeze(1).float())
|
|
return IO.NodeOutput(out)
|
|
|
|
class Trellis2UpsampleCascade(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="Trellis2UpsampleCascade",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Latent.Input("shape_latent_512"),
|
|
IO.Vae.Input("vae"),
|
|
IO.Combo.Input("target_resolution", options=["1024", "1536"], default="1024"),
|
|
IO.Int.Input("max_tokens", default=49152, min=1024, max=100000)
|
|
],
|
|
outputs=[
|
|
IO.AnyType.Output("hr_coords"),
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, shape_latent_512, vae, target_resolution, max_tokens):
|
|
device = comfy.model_management.get_torch_device()
|
|
prepare_trellis_vae_for_decode(vae, shape_latent_512["samples"].shape)
|
|
|
|
coord_counts = shape_latent_512.get("coord_counts")
|
|
decoder = vae.first_stage_model.shape_dec
|
|
lr_resolution = 512
|
|
target_resolution = int(target_resolution)
|
|
|
|
if coord_counts is None:
|
|
feats, coords_512 = flatten_batched_sparse_latent(
|
|
shape_latent_512["samples"],
|
|
shape_latent_512["coords"],
|
|
coord_counts,
|
|
)
|
|
feats = feats.to(device)
|
|
coords_512 = coords_512.to(device)
|
|
slat = shape_norm(feats, coords_512)
|
|
slat.feats = slat.feats.to(next(decoder.parameters()).dtype)
|
|
hr_coords = decoder.upsample(slat, upsample_times=4)
|
|
|
|
hr_resolution = target_resolution
|
|
while True:
|
|
quant_coords = torch.cat([
|
|
hr_coords[:, :1],
|
|
((hr_coords[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(),
|
|
], dim=1)
|
|
final_coords = quant_coords.unique(dim=0)
|
|
num_tokens = final_coords.shape[0]
|
|
|
|
if num_tokens < max_tokens or hr_resolution <= 1024:
|
|
break
|
|
hr_resolution -= 128
|
|
|
|
return IO.NodeOutput(final_coords,)
|
|
|
|
items = split_batched_sparse_latent(
|
|
shape_latent_512["samples"],
|
|
shape_latent_512["coords"],
|
|
coord_counts,
|
|
)
|
|
decoder_dtype = next(decoder.parameters()).dtype
|
|
|
|
sample_hr_coords = []
|
|
for feats_i, coords_i in items:
|
|
feats_i = feats_i.to(device)
|
|
coords_i = coords_i.to(device).clone()
|
|
coords_i[:, 0] = 0
|
|
slat_i = shape_norm(feats_i, coords_i)
|
|
slat_i.feats = slat_i.feats.to(decoder_dtype)
|
|
sample_hr_coords.append(decoder.upsample(slat_i, upsample_times=4))
|
|
|
|
hr_resolution = target_resolution
|
|
while True:
|
|
exceeds_limit = False
|
|
for hr_coords_i in sample_hr_coords:
|
|
quant_coords_i = torch.cat([
|
|
hr_coords_i[:, :1],
|
|
((hr_coords_i[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(),
|
|
], dim=1)
|
|
if quant_coords_i.unique(dim=0).shape[0] >= max_tokens:
|
|
exceeds_limit = True
|
|
break
|
|
if not exceeds_limit or hr_resolution <= 1024:
|
|
break
|
|
hr_resolution -= 128
|
|
|
|
final_coords_list = []
|
|
output_coord_counts = []
|
|
for sample_offset, hr_coords_i in enumerate(sample_hr_coords):
|
|
quant_coords_i = torch.cat([
|
|
hr_coords_i[:, :1],
|
|
((hr_coords_i[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(),
|
|
], dim=1)
|
|
final_coords_i = quant_coords_i.unique(dim=0)
|
|
final_coords_i = final_coords_i.clone()
|
|
final_coords_i[:, 0] = sample_offset
|
|
final_coords_list.append(final_coords_i)
|
|
output_coord_counts.append(int(final_coords_i.shape[0]))
|
|
|
|
output = {
|
|
"coords": torch.cat(final_coords_list, dim=0),
|
|
"coord_counts": torch.tensor(output_coord_counts, dtype=torch.int64),
|
|
"resolutions": torch.full((len(final_coords_list),), int(hr_resolution), dtype=torch.int64),
|
|
}
|
|
|
|
return IO.NodeOutput(output,)
|
|
|
|
dino_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
|
|
dino_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
|
|
|
|
def run_conditioning(model, cropped_img_tensor, include_1024=True):
|
|
model_internal = model.model
|
|
device = comfy.model_management.intermediate_device()
|
|
torch_device = comfy.model_management.get_torch_device()
|
|
|
|
def prepare_tensor(pil_img, size):
|
|
resized_pil = pil_img.resize((size, size), Image.Resampling.LANCZOS)
|
|
img_np = np.array(resized_pil).astype(np.float32) / 255.0
|
|
img_t = torch.from_numpy(img_np).permute(2, 0, 1).unsqueeze(0).to(torch_device)
|
|
return (img_t - dino_mean.to(torch_device)) / dino_std.to(torch_device)
|
|
|
|
model_internal.image_size = 512
|
|
input_512 = prepare_tensor(cropped_img_tensor, 512)
|
|
cond_512 = model_internal(input_512, skip_norm_elementwise=True)[0]
|
|
|
|
cond_1024 = None
|
|
if include_1024:
|
|
model_internal.image_size = 1024
|
|
input_1024 = prepare_tensor(cropped_img_tensor, 1024)
|
|
cond_1024 = model_internal(input_1024, skip_norm_elementwise=True)[0]
|
|
|
|
conditioning = {
|
|
'cond_512': cond_512.to(device),
|
|
'neg_cond': torch.zeros_like(cond_512).to(device),
|
|
}
|
|
if cond_1024 is not None:
|
|
conditioning['cond_1024'] = cond_1024.to(device)
|
|
|
|
return conditioning
|
|
class Trellis2Conditioning(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="Trellis2Conditioning",
|
|
category="conditioning/video_models",
|
|
inputs=[
|
|
IO.ClipVision.Input("clip_vision_model"),
|
|
IO.Image.Input("image"),
|
|
IO.Mask.Input("mask"),
|
|
IO.Combo.Input("background_color", options=["black", "gray", "white"], default="black")
|
|
],
|
|
outputs=[
|
|
IO.Conditioning.Output(display_name="positive"),
|
|
IO.Conditioning.Output(display_name="negative"),
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, clip_vision_model, image, mask, background_color) -> IO.NodeOutput:
|
|
# Normalize to batched form so per-image conditioning loop below is uniform.
|
|
if image.ndim == 3:
|
|
image = image.unsqueeze(0)
|
|
if mask.ndim == 2:
|
|
mask = mask.unsqueeze(0)
|
|
batch_size = image.shape[0]
|
|
if mask.shape[0] == 1 and batch_size > 1:
|
|
mask = mask.expand(batch_size, -1, -1)
|
|
elif mask.shape[0] != batch_size:
|
|
raise ValueError(f"Trellis2Conditioning mask batch {mask.shape[0]} does not match image batch {batch_size}")
|
|
|
|
cond_512_list = []
|
|
cond_1024_list = []
|
|
|
|
for b in range(batch_size):
|
|
item_image = image[b]
|
|
item_mask = mask[b] if mask.size(0) > 1 else mask[0]
|
|
|
|
img_np = (item_image.cpu().numpy() * 255).clip(0, 255).astype(np.uint8)
|
|
mask_np = (item_mask.cpu().numpy() * 255).clip(0, 255).astype(np.uint8)
|
|
|
|
pil_img = Image.fromarray(img_np)
|
|
pil_mask = Image.fromarray(mask_np)
|
|
|
|
max_size = max(pil_img.size)
|
|
scale = min(1.0, 1024 / max_size)
|
|
if scale < 1.0:
|
|
new_w, new_h = int(pil_img.width * scale), int(pil_img.height * scale)
|
|
pil_img = pil_img.resize((new_w, new_h), Image.Resampling.LANCZOS)
|
|
pil_mask = pil_mask.resize((new_w, new_h), Image.Resampling.NEAREST)
|
|
|
|
rgba_np = np.zeros((pil_img.height, pil_img.width, 4), dtype=np.uint8)
|
|
rgba_np[:, :, :3] = np.array(pil_img)
|
|
rgba_np[:, :, 3] = np.array(pil_mask)
|
|
|
|
alpha = rgba_np[:, :, 3]
|
|
bbox_coords = np.argwhere(alpha > 0.8 * 255)
|
|
|
|
if len(bbox_coords) > 0:
|
|
y_min, x_min = np.min(bbox_coords[:, 0]), np.min(bbox_coords[:, 1])
|
|
y_max, x_max = np.max(bbox_coords[:, 0]), np.max(bbox_coords[:, 1])
|
|
|
|
center_y, center_x = (y_min + y_max) / 2.0, (x_min + x_max) / 2.0
|
|
size = max(y_max - y_min, x_max - x_min)
|
|
|
|
crop_x1 = int(center_x - size // 2)
|
|
crop_y1 = int(center_y - size // 2)
|
|
crop_x2 = int(center_x + size // 2)
|
|
crop_y2 = int(center_y + size // 2)
|
|
|
|
rgba_pil = Image.fromarray(rgba_np)
|
|
cropped_rgba = rgba_pil.crop((crop_x1, crop_y1, crop_x2, crop_y2))
|
|
cropped_np = np.array(cropped_rgba).astype(np.float32) / 255.0
|
|
else:
|
|
import logging
|
|
logging.warning("Mask for the image is empty. Trellis2 requires an image with a mask for the best mesh quality.")
|
|
cropped_np = rgba_np.astype(np.float32) / 255.0
|
|
|
|
bg_colors = {"black":[0.0, 0.0, 0.0], "gray":[0.5, 0.5, 0.5], "white":[1.0, 1.0, 1.0]}
|
|
bg_rgb = np.array(bg_colors.get(background_color, [0.0, 0.0, 0.0]), dtype=np.float32)
|
|
|
|
fg = cropped_np[:, :, :3]
|
|
alpha_float = cropped_np[:, :, 3:4]
|
|
composite_np = fg * alpha_float + bg_rgb * (1.0 - alpha_float)
|
|
|
|
# to match trellis2 code (quantize -> dequantize)
|
|
composite_uint8 = (composite_np * 255.0).round().clip(0, 255).astype(np.uint8)
|
|
|
|
cropped_pil = Image.fromarray(composite_uint8)
|
|
|
|
item_conditioning = run_conditioning(clip_vision_model, cropped_pil, include_1024=True)
|
|
cond_512_list.append(item_conditioning["cond_512"])
|
|
cond_1024_list.append(item_conditioning["cond_1024"])
|
|
|
|
cond_512_batched = torch.cat(cond_512_list, dim=0)
|
|
cond_1024_batched = torch.cat(cond_1024_list, dim=0)
|
|
neg_cond_batched = torch.zeros_like(cond_512_batched)
|
|
neg_embeds_batched = torch.zeros_like(cond_1024_batched)
|
|
|
|
positive = [[cond_512_batched, {"embeds": cond_1024_batched}]]
|
|
negative = [[neg_cond_batched, {"embeds": neg_embeds_batched}]]
|
|
return IO.NodeOutput(positive, negative)
|
|
|
|
class EmptyTrellis2ShapeLatent(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="EmptyTrellis2ShapeLatent",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.AnyType.Input("structure_or_coords"),
|
|
IO.Model.Input("model"),
|
|
],
|
|
outputs=[
|
|
IO.Latent.Output(),
|
|
IO.Model.Output()
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, structure_or_coords, model):
|
|
# to accept the upscaled coords
|
|
is_512_pass = False
|
|
|
|
if hasattr(structure_or_coords, "data") and structure_or_coords.data.ndim == 4:
|
|
decoded = structure_or_coords.data.unsqueeze(1)
|
|
coords = torch.argwhere(decoded.bool())[:, [0, 2, 3, 4]].int()
|
|
is_512_pass = True
|
|
|
|
elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2:
|
|
coords = structure_or_coords.int()
|
|
is_512_pass = False
|
|
else:
|
|
raise ValueError(f"Invalid input to EmptyTrellis2ShapeLatent: {type(structure_or_coords)}")
|
|
|
|
batch_size, counts, max_tokens = infer_batched_coord_layout(coords)
|
|
in_channels = 32
|
|
# image like format
|
|
latent = torch.zeros(batch_size, in_channels, max_tokens, 1)
|
|
model = model.clone()
|
|
model.model_options = model.model_options.copy()
|
|
if "transformer_options" in model.model_options:
|
|
model.model_options["transformer_options"] = model.model_options["transformer_options"].copy()
|
|
else:
|
|
model.model_options["transformer_options"] = {}
|
|
|
|
model.model_options["transformer_options"]["coords"] = coords
|
|
if is_512_pass:
|
|
model.model_options["transformer_options"]["generation_mode"] = "shape_generation_512"
|
|
else:
|
|
model.model_options["transformer_options"]["generation_mode"] = "shape_generation"
|
|
return IO.NodeOutput({"samples": latent, "coords": coords, "coords_counts": counts, "type": "trellis2"}, model)
|
|
|
|
class EmptyTextureLatentTrellis2(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="EmptyTextureLatentTrellis2",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Voxel.Input("structure_or_coords"),
|
|
IO.Latent.Input("shape_latent"),
|
|
IO.Model.Input("model"),
|
|
],
|
|
outputs=[
|
|
IO.Latent.Output(),
|
|
IO.Model.Output()
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, structure_or_coords, shape_latent, model):
|
|
channels = 32
|
|
if hasattr(structure_or_coords, "data") and structure_or_coords.data.ndim == 4:
|
|
decoded = structure_or_coords.data.unsqueeze(1)
|
|
coords = torch.argwhere(decoded.bool())[:, [0, 2, 3, 4]].int()
|
|
|
|
elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2:
|
|
coords = structure_or_coords.int()
|
|
|
|
batch_size, counts, max_tokens = infer_batched_coord_layout(coords)
|
|
|
|
shape_latent = shape_latent["samples"]
|
|
if shape_latent.ndim == 4:
|
|
shape_latent = shape_latent.squeeze(-1).transpose(1, 2).reshape(-1, channels)
|
|
|
|
latent = torch.zeros(batch_size, channels, max_tokens, 1)
|
|
model = model.clone()
|
|
model.model_options = model.model_options.copy()
|
|
if "transformer_options" in model.model_options:
|
|
model.model_options["transformer_options"] = model.model_options["transformer_options"].copy()
|
|
else:
|
|
model.model_options["transformer_options"] = {}
|
|
|
|
model.model_options["transformer_options"]["coords"] = coords
|
|
model.model_options["transformer_options"]["generation_mode"] = "texture_generation"
|
|
model.model_options["transformer_options"]["shape_slat"] = shape_latent
|
|
return IO.NodeOutput({"samples": latent, "coords": coords, "coords_counts": counts, "type": "trellis2"}, model)
|
|
|
|
|
|
class EmptyTrellis2LatentStructure(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="EmptyStructureLatentTrellis2",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."),
|
|
],
|
|
outputs=[
|
|
IO.Latent.Output(),
|
|
]
|
|
)
|
|
@classmethod
|
|
def execute(cls, batch_size):
|
|
in_channels = 8
|
|
resolution = 16
|
|
latent = torch.zeros(batch_size, in_channels, resolution, resolution, resolution)
|
|
output = {
|
|
"samples": latent,
|
|
"type": "trellis2",
|
|
}
|
|
return IO.NodeOutput(output)
|
|
|
|
@triton.jit
|
|
def qem_edge_errors_kernel(
|
|
verts_ptr, Q_ptr, edges_ptr, optimal_ptr, error_ptr, wander_ptr,
|
|
n_edges, stabilizer, max_edge_length_sq, mesh_scale_sq,
|
|
BLOCK_SIZE: tl.constexpr
|
|
):
|
|
pid = tl.program_id(0)
|
|
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
|
mask = offs < n_edges
|
|
|
|
va = tl.load(edges_ptr + offs * 2, mask=mask, other=0).to(tl.int64)
|
|
vb = tl.load(edges_ptr + offs * 2 + 1, mask=mask, other=0).to(tl.int64)
|
|
|
|
vax = tl.load(verts_ptr + va * 3 + 0, mask=mask, other=0.0)
|
|
vay = tl.load(verts_ptr + va * 3 + 1, mask=mask, other=0.0)
|
|
vaz = tl.load(verts_ptr + va * 3 + 2, mask=mask, other=0.0)
|
|
vbx = tl.load(verts_ptr + vb * 3 + 0, mask=mask, other=0.0)
|
|
vby = tl.load(verts_ptr + vb * 3 + 1, mask=mask, other=0.0)
|
|
vbz = tl.load(verts_ptr + vb * 3 + 2, mask=mask, other=0.0)
|
|
|
|
ex = vbx - vax
|
|
ey = vby - vay
|
|
ez = vbz - vaz
|
|
el_sq = ex * ex + ey * ey + ez * ez
|
|
el = tl.sqrt(el_sq)
|
|
|
|
Qa_base = Q_ptr + va * 16
|
|
Qb_base = Q_ptr + vb * 16
|
|
|
|
qe0 = tl.load(Qa_base + 0, mask=mask, other=0.0) + tl.load(Qb_base + 0, mask=mask, other=0.0)
|
|
qe1 = tl.load(Qa_base + 1, mask=mask, other=0.0) + tl.load(Qb_base + 1, mask=mask, other=0.0)
|
|
qe2 = tl.load(Qa_base + 2, mask=mask, other=0.0) + tl.load(Qb_base + 2, mask=mask, other=0.0)
|
|
qe3 = tl.load(Qa_base + 3, mask=mask, other=0.0) + tl.load(Qb_base + 3, mask=mask, other=0.0)
|
|
qe4 = tl.load(Qa_base + 4, mask=mask, other=0.0) + tl.load(Qb_base + 4, mask=mask, other=0.0)
|
|
qe5 = tl.load(Qa_base + 5, mask=mask, other=0.0) + tl.load(Qb_base + 5, mask=mask, other=0.0)
|
|
qe6 = tl.load(Qa_base + 6, mask=mask, other=0.0) + tl.load(Qb_base + 6, mask=mask, other=0.0)
|
|
qe7 = tl.load(Qa_base + 7, mask=mask, other=0.0) + tl.load(Qb_base + 7, mask=mask, other=0.0)
|
|
qe8 = tl.load(Qa_base + 8, mask=mask, other=0.0) + tl.load(Qb_base + 8, mask=mask, other=0.0)
|
|
qe9 = tl.load(Qa_base + 9, mask=mask, other=0.0) + tl.load(Qb_base + 9, mask=mask, other=0.0)
|
|
qe10 = tl.load(Qa_base + 10, mask=mask, other=0.0) + tl.load(Qb_base + 10, mask=mask, other=0.0)
|
|
qe11 = tl.load(Qa_base + 11, mask=mask, other=0.0) + tl.load(Qb_base + 11, mask=mask, other=0.0)
|
|
qe12 = tl.load(Qa_base + 12, mask=mask, other=0.0) + tl.load(Qb_base + 12, mask=mask, other=0.0)
|
|
qe13 = tl.load(Qa_base + 13, mask=mask, other=0.0) + tl.load(Qb_base + 13, mask=mask, other=0.0)
|
|
qe14 = tl.load(Qa_base + 14, mask=mask, other=0.0) + tl.load(Qb_base + 14, mask=mask, other=0.0)
|
|
qe15 = tl.load(Qa_base + 15, mask=mask, other=0.0) + tl.load(Qb_base + 15, mask=mask, other=0.0)
|
|
|
|
a11 = qe0 + stabilizer
|
|
a12 = qe1
|
|
a13 = qe2
|
|
a21 = qe4
|
|
a22 = qe5 + stabilizer
|
|
a23 = qe6
|
|
a31 = qe8
|
|
a32 = qe9
|
|
a33 = qe10 + stabilizer
|
|
|
|
b1 = -qe3
|
|
b2 = -qe7
|
|
b3 = -qe11
|
|
|
|
det = (a11 * (a22 * a33 - a23 * a32)
|
|
- a12 * (a21 * a33 - a23 * a31)
|
|
+ a13 * (a21 * a32 - a22 * a31))
|
|
|
|
det_good = tl.abs(det) > 1e-12
|
|
|
|
det_x = (b1 * (a22 * a33 - a23 * a32)
|
|
- a12 * (b2 * a33 - a23 * b3)
|
|
+ a13 * (b2 * a32 - a22 * b3))
|
|
det_y = (a11 * (b2 * a33 - a23 * b3)
|
|
- b1 * (a21 * a33 - a23 * a31)
|
|
+ a13 * (a21 * b3 - b2 * a31))
|
|
det_z = (a11 * (a22 * b3 - b2 * a32)
|
|
- a12 * (a21 * b3 - b2 * a31)
|
|
+ b1 * (a21 * a32 - a22 * a31))
|
|
|
|
ox = tl.where(det_good, det_x / det, (vax + vbx) * 0.5)
|
|
oy = tl.where(det_good, det_y / det, (vay + vby) * 0.5)
|
|
oz = tl.where(det_good, det_z / det, (vaz + vbz) * 0.5)
|
|
|
|
dist_a_sq = (ox - vax) * (ox - vax) + (oy - vay) * (oy - vay) + (oz - vaz) * (oz - vaz)
|
|
dist_b_sq = (ox - vbx) * (ox - vbx) + (oy - vby) * (oy - vby) + (oz - vbz) * (oz - vbz)
|
|
wander_thresh = 16.0 * el_sq
|
|
wander_bad = (dist_a_sq > wander_thresh) | (dist_b_sq > wander_thresh)
|
|
|
|
ox = tl.where(wander_bad & (el > 0.0), (vax + vbx) * 0.5, ox)
|
|
oy = tl.where(wander_bad & (el > 0.0), (vay + vby) * 0.5, oy)
|
|
oz = tl.where(wander_bad & (el > 0.0), (vaz + vbz) * 0.5, oz)
|
|
|
|
v4_0 = ox
|
|
v4_1 = oy
|
|
v4_2 = oz
|
|
v4_3 = 1.0
|
|
|
|
qv0 = qe0 * v4_0 + qe1 * v4_1 + qe2 * v4_2 + qe3 * v4_3
|
|
qv1 = qe4 * v4_0 + qe5 * v4_1 + qe6 * v4_2 + qe7 * v4_3
|
|
qv2 = qe8 * v4_0 + qe9 * v4_1 + qe10 * v4_2 + qe11 * v4_3
|
|
qv3 = qe12 * v4_0 + qe13 * v4_1 + qe14 * v4_2 + qe15 * v4_3
|
|
|
|
err = tl.abs(v4_0 * qv0 + v4_1 * qv1 + v4_2 * qv2 + v4_3 * qv3)
|
|
|
|
tl.store(optimal_ptr + offs * 3 + 0, ox, mask=mask)
|
|
tl.store(optimal_ptr + offs * 3 + 1, oy, mask=mask)
|
|
tl.store(optimal_ptr + offs * 3 + 2, oz, mask=mask)
|
|
tl.store(error_ptr + offs, err, mask=mask)
|
|
tl.store(wander_ptr + offs, wander_bad.to(tl.int32), mask=mask)
|
|
|
|
|
|
@triton.jit
|
|
def validate_faces_kernel(
|
|
verts_ptr, faces_ptr, va_ptr, vb_ptr, opt_ptr, pair_edge_ptr, pair_face_ptr,
|
|
n_pairs, area_thresh, keep_mask_ptr,
|
|
BLOCK_SIZE: tl.constexpr
|
|
):
|
|
pid = tl.program_id(0)
|
|
offs = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
|
mask = offs < n_pairs
|
|
|
|
ei = tl.load(pair_edge_ptr + offs, mask=mask, other=0).to(tl.int64)
|
|
fi = tl.load(pair_face_ptr + offs, mask=mask, other=0).to(tl.int64)
|
|
|
|
f0 = tl.load(faces_ptr + fi * 3 + 0, mask=mask, other=0).to(tl.int64)
|
|
f1 = tl.load(faces_ptr + fi * 3 + 1, mask=mask, other=0).to(tl.int64)
|
|
f2 = tl.load(faces_ptr + fi * 3 + 2, mask=mask, other=0).to(tl.int64)
|
|
|
|
vai = tl.load(va_ptr + ei, mask=mask, other=0).to(tl.int64)
|
|
vbi = tl.load(vb_ptr + ei, mask=mask, other=0).to(tl.int64)
|
|
|
|
optx = tl.load(opt_ptr + ei * 3 + 0, mask=mask, other=0.0)
|
|
opty = tl.load(opt_ptr + ei * 3 + 1, mask=mask, other=0.0)
|
|
optz = tl.load(opt_ptr + ei * 3 + 2, mask=mask, other=0.0)
|
|
|
|
v0x = tl.load(verts_ptr + f0 * 3 + 0, mask=mask, other=0.0)
|
|
v0y = tl.load(verts_ptr + f0 * 3 + 1, mask=mask, other=0.0)
|
|
v0z = tl.load(verts_ptr + f0 * 3 + 2, mask=mask, other=0.0)
|
|
v1x = tl.load(verts_ptr + f1 * 3 + 0, mask=mask, other=0.0)
|
|
v1y = tl.load(verts_ptr + f1 * 3 + 1, mask=mask, other=0.0)
|
|
v1z = tl.load(verts_ptr + f1 * 3 + 2, mask=mask, other=0.0)
|
|
v2x = tl.load(verts_ptr + f2 * 3 + 0, mask=mask, other=0.0)
|
|
v2y = tl.load(verts_ptr + f2 * 3 + 1, mask=mask, other=0.0)
|
|
v2z = tl.load(verts_ptr + f2 * 3 + 2, mask=mask, other=0.0)
|
|
|
|
is_v0_a = (f0 == vai) | (f0 == vbi)
|
|
is_v1_a = (f1 == vai) | (f1 == vbi)
|
|
is_v2_a = (f2 == vai) | (f2 == vbi)
|
|
|
|
n0x = tl.where(is_v0_a, optx, v0x)
|
|
n0y = tl.where(is_v0_a, opty, v0y)
|
|
n0z = tl.where(is_v0_a, optz, v0z)
|
|
n1x = tl.where(is_v1_a, optx, v1x)
|
|
n1y = tl.where(is_v1_a, opty, v1y)
|
|
n1z = tl.where(is_v1_a, optz, v1z)
|
|
n2x = tl.where(is_v2_a, optx, v2x)
|
|
n2y = tl.where(is_v2_a, opty, v2y)
|
|
n2z = tl.where(is_v2_a, optz, v2z)
|
|
|
|
e1x_old = v1x - v0x
|
|
e1y_old = v1y - v0y
|
|
e1z_old = v1z - v0z
|
|
e2x_old = v2x - v0x
|
|
e2y_old = v2y - v0y
|
|
e2z_old = v2z - v0z
|
|
|
|
nx_old = e1y_old * e2z_old - e1z_old * e2y_old
|
|
ny_old = e1z_old * e2x_old - e1x_old * e2z_old
|
|
nz_old = e1x_old * e2y_old - e1y_old * e2x_old
|
|
area_old_sq = nx_old * nx_old + ny_old * ny_old + nz_old * nz_old
|
|
area_old = tl.sqrt(area_old_sq)
|
|
|
|
e1x_new = n1x - n0x
|
|
e1y_new = n1y - n0y
|
|
e1z_new = n1z - n0z
|
|
e2x_new = n2x - n0x
|
|
e2y_new = n2y - n0y
|
|
e2z_new = n2z - n0z
|
|
|
|
nx_new = e1y_new * e2z_new - e1z_new * e2y_new
|
|
ny_new = e1z_new * e2x_new - e1x_new * e2z_new
|
|
nz_new = e1x_new * e2y_new - e1y_new * e2x_new
|
|
area_new_sq = nx_new * nx_new + ny_new * ny_new + nz_new * nz_new
|
|
area_new = tl.sqrt(area_new_sq)
|
|
|
|
area_bad = area_new_sq < area_thresh * area_thresh
|
|
dot = nx_old * nx_new + ny_old * ny_new + nz_old * nz_new
|
|
flip_bad = dot < -0.2 * area_old * area_new
|
|
|
|
e0x_new = n1x - n0x
|
|
e0y_new = n1y - n0y
|
|
e0z_new = n1z - n0z
|
|
e1x_new2 = n2x - n1x
|
|
e1y_new2 = n2y - n1y
|
|
e1z_new2 = n2z - n1z
|
|
e2x_new2 = n0x - n2x
|
|
e2y_new2 = n0y - n2y
|
|
e2z_new2 = n0z - n2z
|
|
|
|
l0_new_sq = e0x_new * e0x_new + e0y_new * e0y_new + e0z_new * e0z_new
|
|
l1_new_sq = e1x_new2 * e1x_new2 + e1y_new2 * e1y_new2 + e1z_new2 * e1z_new2
|
|
l2_new_sq = e2x_new2 * e2x_new2 + e2y_new2 * e2y_new2 + e2z_new2 * e2z_new2
|
|
max_new_sq = tl.maximum(tl.maximum(l0_new_sq, l1_new_sq), l2_new_sq)
|
|
|
|
e0x_old = v1x - v0x
|
|
e0y_old = v1y - v0y
|
|
e0z_old = v1z - v0z
|
|
e1x_old2 = v2x - v1x
|
|
e1y_old2 = v2y - v1y
|
|
e1z_old2 = v2z - v1z
|
|
e2x_old2 = v0x - v2x
|
|
e2y_old2 = v0y - v2y
|
|
e2z_old2 = v0z - v2z
|
|
|
|
l0_old_sq = e0x_old * e0x_old + e0y_old * e0y_old + e0z_old * e0z_old
|
|
l1_old_sq = e1x_old2 * e1x_old2 + e1y_old2 * e1y_old2 + e1z_old2 * e1z_old2
|
|
l2_old_sq = e2x_old2 * e2x_old2 + e2y_old2 * e2y_old2 + e2z_old2 * e2z_old2
|
|
max_old_sq = tl.maximum(tl.maximum(l0_old_sq, l1_old_sq), l2_old_sq)
|
|
|
|
stretch_bad = max_new_sq > 6.25 * max_old_sq
|
|
|
|
any_bad = area_bad | flip_bad | stretch_bad
|
|
tl.store(keep_mask_ptr + offs, any_bad.to(tl.int32), mask=mask)
|
|
|
|
def _pytorch_edge_errors(verts, Q, edges, stabilizer, max_edge_length_sq, mesh_scale_sq):
|
|
n_edges = edges.shape[0]
|
|
if n_edges == 0:
|
|
return (torch.empty((0, 3), dtype=torch.float64, device=verts.device),
|
|
torch.empty((0,), dtype=torch.float64, device=verts.device),
|
|
torch.zeros((0,), dtype=torch.bool, device=verts.device))
|
|
|
|
device = verts.device
|
|
mesh_scale = (mesh_scale_sq) ** 0.5
|
|
|
|
va = edges[:, 0]
|
|
vb = edges[:, 1]
|
|
Q0 = Q[va]
|
|
Q1 = Q[vb]
|
|
Qe = Q0 + Q1
|
|
|
|
A = Qe[:, :3, :3] + torch.eye(3, device=device, dtype=torch.float64).unsqueeze(0) * stabilizer
|
|
b = -Qe[:, :3, 3].unsqueeze(-1)
|
|
|
|
dets = torch.det(A)
|
|
good = dets.abs() > 1e-12
|
|
opt = torch.zeros((n_edges, 3), dtype=torch.float64, device=device)
|
|
|
|
if good.any():
|
|
try:
|
|
sol = torch.linalg.solve(A[good], b[good])
|
|
opt[good] = sol.squeeze(-1)
|
|
except Exception:
|
|
good = torch.zeros_like(good)
|
|
|
|
if (~good).any():
|
|
bad_idx = torch.nonzero(~good, as_tuple=True)[0]
|
|
opt[bad_idx] = (verts[va[bad_idx]] + verts[vb[bad_idx]]) * 0.5
|
|
|
|
pa = verts[va]
|
|
pb = verts[vb]
|
|
el = torch.norm(pb - pa, dim=-1)
|
|
dist_a = torch.norm(opt - pa, dim=-1)
|
|
dist_b = torch.norm(opt - pb, dim=-1)
|
|
wander_bad = (dist_a > 4.0 * el) | (dist_b > 4.0 * el)
|
|
|
|
if wander_bad.any():
|
|
bad_idx = torch.nonzero(wander_bad, as_tuple=True)[0]
|
|
opt[bad_idx] = (verts[va[bad_idx]] + verts[vb[bad_idx]]) * 0.5
|
|
|
|
v4 = torch.cat([opt, torch.ones((n_edges, 1), device=device, dtype=torch.float64)], dim=1)
|
|
err = torch.abs(torch.einsum("ei,eij,ej->e", v4, Qe, v4))
|
|
|
|
length_ok = el > mesh_scale * 1e-5
|
|
error_ok = err < max_edge_length_sq
|
|
nan_ok = ~torch.isnan(opt).any(dim=-1) & ~torch.isnan(err)
|
|
valid = length_ok & error_ok & nan_ok
|
|
|
|
return opt, err, valid
|
|
|
|
|
|
def _triton_edge_errors(verts, Q, edges, stabilizer, max_edge_length_sq, mesh_scale_sq):
|
|
n_edges = edges.shape[0]
|
|
if n_edges == 0:
|
|
return (torch.empty((0, 3), dtype=torch.float64, device=verts.device),
|
|
torch.empty((0,), dtype=torch.float64, device=verts.device),
|
|
torch.zeros((0,), dtype=torch.bool, device=verts.device))
|
|
|
|
device = verts.device
|
|
|
|
optimal = torch.empty((n_edges, 3), dtype=torch.float64, device=device)
|
|
error = torch.empty((n_edges,), dtype=torch.float64, device=device)
|
|
wander = torch.empty((n_edges,), dtype=torch.int32, device=device)
|
|
|
|
BLOCK_SIZE = 256
|
|
grid = (triton.cdiv(n_edges, BLOCK_SIZE),)
|
|
|
|
try:
|
|
qem_edge_errors_kernel[grid](
|
|
verts, Q, edges, optimal, error, wander,
|
|
n_edges, stabilizer, max_edge_length_sq, mesh_scale_sq,
|
|
BLOCK_SIZE=BLOCK_SIZE
|
|
)
|
|
except Exception:
|
|
return _pytorch_edge_errors(verts, Q, edges, stabilizer, max_edge_length_sq, mesh_scale_sq)
|
|
|
|
has_nan = torch.isnan(optimal).any() or torch.isnan(error).any()
|
|
has_inf = torch.isinf(error).any()
|
|
|
|
if has_nan or has_inf:
|
|
return _pytorch_edge_errors(verts, Q, edges, stabilizer, max_edge_length_sq, mesh_scale_sq)
|
|
|
|
pa = verts[edges[:, 0]]
|
|
pb = verts[edges[:, 1]]
|
|
el = torch.norm(pb - pa, dim=-1)
|
|
mesh_scale = (mesh_scale_sq) ** 0.5
|
|
|
|
length_ok = el > mesh_scale * 1e-5
|
|
error_ok = error < max_edge_length_sq
|
|
nan_ok = ~torch.isnan(optimal).any(dim=-1) & ~torch.isnan(error)
|
|
valid = length_ok & error_ok & nan_ok
|
|
|
|
return optimal, error, valid
|
|
|
|
def _pytorch_validate_faces(verts, faces, v_a, v_b, opt_pos, pair_edge_idx, pair_face_idx, area_thresh):
|
|
n_pairs = len(pair_edge_idx)
|
|
if n_pairs == 0:
|
|
return torch.ones(v_a.numel(), dtype=torch.bool, device=verts.device)
|
|
|
|
device = verts.device
|
|
|
|
old_faces = faces[pair_face_idx]
|
|
v0_old = verts[old_faces[:, 0]]
|
|
v1_old = verts[old_faces[:, 1]]
|
|
v2_old = verts[old_faces[:, 2]]
|
|
|
|
v0_new = v0_old.clone()
|
|
v1_new = v1_old.clone()
|
|
v2_new = v2_old.clone()
|
|
|
|
va_t = v_a[pair_edge_idx]
|
|
vb_t = v_b[pair_edge_idx]
|
|
opt_t = opt_pos[pair_edge_idx]
|
|
|
|
mask0 = (old_faces[:, 0] == va_t) | (old_faces[:, 0] == vb_t)
|
|
mask1 = (old_faces[:, 1] == va_t) | (old_faces[:, 1] == vb_t)
|
|
mask2 = (old_faces[:, 2] == va_t) | (old_faces[:, 2] == vb_t)
|
|
|
|
v0_new[mask0] = opt_t[mask0]
|
|
v1_new[mask1] = opt_t[mask1]
|
|
v2_new[mask2] = opt_t[mask2]
|
|
|
|
e1_old = v1_old - v0_old
|
|
e2_old = v2_old - v0_old
|
|
n_old = torch.cross(e1_old, e2_old, dim=-1)
|
|
|
|
e1_new = v1_new - v0_new
|
|
e2_new = v2_new - v0_new
|
|
n_new = torch.cross(e1_new, e2_new, dim=-1)
|
|
|
|
area_new = torch.norm(n_new, dim=-1)
|
|
area_bad = area_new < area_thresh
|
|
|
|
n_old_norm = n_old / (torch.norm(n_old, dim=-1, keepdim=True) + 1e-12)
|
|
n_new_norm = n_new / (torch.norm(n_new, dim=-1, keepdim=True) + 1e-12)
|
|
dots = (n_old_norm * n_new_norm).sum(dim=-1)
|
|
flip_bad = dots < -0.2
|
|
|
|
old_edges = torch.stack([
|
|
torch.norm(v1_old - v0_old, dim=-1),
|
|
torch.norm(v2_old - v1_old, dim=-1),
|
|
torch.norm(v0_old - v2_old, dim=-1),
|
|
], dim=1).max(dim=1)[0]
|
|
|
|
new_edges = torch.stack([
|
|
torch.norm(v1_new - v0_new, dim=-1),
|
|
torch.norm(v2_new - v1_new, dim=-1),
|
|
torch.norm(v0_new - v2_new, dim=-1),
|
|
], dim=1).max(dim=1)[0]
|
|
|
|
stretch_bad = new_edges > 2.5 * old_edges
|
|
|
|
def face_angles(v0, v1, v2):
|
|
e0 = v1 - v0
|
|
e1 = v2 - v1
|
|
e2 = v0 - v2
|
|
l0 = torch.norm(e0, dim=-1)
|
|
l1 = torch.norm(e1, dim=-1)
|
|
l2 = torch.norm(e2, dim=-1)
|
|
cos_a = (l1 * l1 + l2 * l2 - l0 * l0) / (2 * l1 * l2 + 1e-12)
|
|
cos_b = (l0 * l0 + l2 * l2 - l1 * l1) / (2 * l0 * l2 + 1e-12)
|
|
cos_c = (l0 * l0 + l1 * l1 - l2 * l2) / (2 * l0 * l1 + 1e-12)
|
|
cos_all = torch.stack([cos_a, cos_b, cos_c], dim=-1)
|
|
return torch.acos(torch.clamp(cos_all, -1, 1)) * 180 / np.pi
|
|
|
|
new_angles = face_angles(v0_new, v1_new, v2_new)
|
|
angle_bad = (new_angles < 1.0).any(dim=-1) | (new_angles > 178.0).any(dim=-1)
|
|
|
|
any_bad = area_bad | flip_bad | stretch_bad | angle_bad
|
|
|
|
keep_mask = torch.ones(v_a.numel(), dtype=torch.bool, device=device)
|
|
if any_bad.any():
|
|
bad_edges = pair_edge_idx[any_bad]
|
|
keep_mask.scatter_(0, bad_edges, False)
|
|
return keep_mask
|
|
|
|
|
|
def _triton_validate_faces(verts, faces, v_a, v_b, opt_pos, pair_edge_idx, pair_face_idx, area_thresh):
|
|
n_pairs = len(pair_edge_idx)
|
|
if n_pairs == 0:
|
|
return torch.ones(v_a.numel(), dtype=torch.bool, device=verts.device)
|
|
|
|
device = verts.device
|
|
|
|
pair_bad = torch.empty(n_pairs, dtype=torch.int32, device=device)
|
|
|
|
BLOCK_SIZE = 256
|
|
grid = (triton.cdiv(n_pairs, BLOCK_SIZE),)
|
|
|
|
try:
|
|
validate_faces_kernel[grid](
|
|
verts, faces, v_a, v_b, opt_pos, pair_edge_idx, pair_face_idx,
|
|
n_pairs, area_thresh, pair_bad,
|
|
BLOCK_SIZE=BLOCK_SIZE
|
|
)
|
|
except Exception:
|
|
return _pytorch_validate_faces(verts, faces, v_a, v_b, opt_pos, pair_edge_idx, pair_face_idx, area_thresh)
|
|
|
|
keep_mask = torch.ones(v_a.numel(), dtype=torch.bool, device=device)
|
|
bad_mask = pair_bad.bool()
|
|
if bad_mask.any():
|
|
bad_edges = pair_edge_idx[bad_mask]
|
|
keep_mask.scatter_(0, bad_edges, False)
|
|
return keep_mask
|
|
|
|
def _build_quadrics(verts, faces):
|
|
v0 = verts[faces[:, 0]]
|
|
v1 = verts[faces[:, 1]]
|
|
v2 = verts[faces[:, 2]]
|
|
e1 = v1 - v0
|
|
e2 = v2 - v0
|
|
n = torch.cross(e1, e2, dim=-1)
|
|
area = torch.norm(n, dim=-1)
|
|
mask = area > 1e-12
|
|
n_norm = torch.zeros_like(n)
|
|
n_norm[mask] = n[mask] / area[mask].unsqueeze(-1)
|
|
d = -(n_norm * v0).sum(dim=-1, keepdim=True)
|
|
p = torch.cat([n_norm, d], dim=-1)
|
|
K = torch.einsum("fi,fj->fij", p, p)
|
|
K = K * area[:, None, None]
|
|
V = verts.shape[0]
|
|
Q = torch.zeros((V, 4, 4), dtype=torch.float64, device=verts.device)
|
|
K_flat = K.reshape(-1, 16)
|
|
Q_flat = Q.reshape(V, 16)
|
|
for corner in range(3):
|
|
idx = faces[:, corner].unsqueeze(1).expand(-1, 16)
|
|
Q_flat.scatter_add_(0, idx, K_flat)
|
|
return Q_flat.reshape(V, 4, 4)
|
|
|
|
|
|
def _cleanup_mesh(verts, faces, min_angle_deg=0.5, max_aspect=100.0):
|
|
if faces.numel() == 0:
|
|
return verts, faces
|
|
|
|
v0 = verts[faces[:, 0]]
|
|
v1 = verts[faces[:, 1]]
|
|
v2 = verts[faces[:, 2]]
|
|
e0 = v1 - v0
|
|
e1 = v2 - v1
|
|
e2 = v0 - v2
|
|
l0 = torch.norm(e0, dim=-1)
|
|
l1 = torch.norm(e1, dim=-1)
|
|
l2 = torch.norm(e2, dim=-1)
|
|
n = torch.cross(e0, e2, dim=-1)
|
|
area = torch.norm(n, dim=-1)
|
|
|
|
max_edge = torch.max(torch.max(l0, l1), l2)
|
|
aspect = max_edge * max_edge / (2.0 * area + 1e-12)
|
|
|
|
cos_a = (l1 * l1 + l2 * l2 - l0 * l0) / (2 * l1 * l2 + 1e-12)
|
|
cos_b = (l0 * l0 + l2 * l2 - l1 * l1) / (2 * l0 * l2 + 1e-12)
|
|
cos_c = (l0 * l0 + l1 * l1 - l2 * l2) / (2 * l0 * l1 + 1e-12)
|
|
cos_all = torch.stack([cos_a, cos_b, cos_c], dim=-1)
|
|
angles = torch.acos(torch.clamp(cos_all, -1, 1)) * 180 / np.pi
|
|
|
|
good = (aspect < max_aspect) & (angles.min(dim=1)[0] > min_angle_deg) & (area > 1e-12)
|
|
faces = faces[good]
|
|
|
|
if faces.numel() == 0:
|
|
return verts, faces
|
|
|
|
used = torch.zeros(verts.shape[0], dtype=torch.bool, device=verts.device)
|
|
used[faces[:, 0]] = True
|
|
used[faces[:, 1]] = True
|
|
used[faces[:, 2]] = True
|
|
|
|
remap = torch.full((verts.shape[0],), -1, dtype=torch.int64, device=verts.device)
|
|
remap[used] = torch.arange(used.sum().item(), device=verts.device)
|
|
verts = verts[used]
|
|
faces = remap[faces]
|
|
return verts, faces
|
|
|
|
|
|
def _build_vertex_face_csr(faces, num_verts):
|
|
vf_verts = faces.view(-1)
|
|
vf_faces = torch.arange(faces.shape[0], device=faces.device).repeat_interleave(3)
|
|
|
|
sort_idx = torch.argsort(vf_verts, stable=True)
|
|
sorted_verts = vf_verts[sort_idx]
|
|
sorted_faces = vf_faces[sort_idx]
|
|
|
|
unique_verts, counts = torch.unique_consecutive(sorted_verts, return_counts=True)
|
|
|
|
ptrs = torch.zeros(num_verts + 1, dtype=torch.int64, device=faces.device)
|
|
ptrs[unique_verts + 1] = counts
|
|
ptrs = torch.cumsum(ptrs, dim=0)
|
|
|
|
return sorted_faces, ptrs
|
|
|
|
|
|
def _get_vertex_faces(v, face_indices, vert_ptrs):
|
|
start = vert_ptrs[v]
|
|
end = vert_ptrs[v + 1]
|
|
return face_indices[start:end]
|
|
|
|
def _gpu_greedy_sampled(edges, errors, v_alive, max_select):
|
|
device = edges.device
|
|
n_edges = edges.shape[0]
|
|
|
|
if n_edges == 0:
|
|
return torch.empty(0, dtype=torch.int64, device=device)
|
|
|
|
# Sort by error
|
|
sorted_idx = torch.argsort(errors)
|
|
sorted_edges = edges[sorted_idx]
|
|
|
|
# Sample K edges from the sorted list
|
|
# This gives us diverse edges spread across the mesh
|
|
K = min(max_select * 20, n_edges)
|
|
if K < n_edges:
|
|
sample_positions = torch.linspace(0, n_edges - 1, K, device=device).long()
|
|
sampled_edges = sorted_edges[sample_positions]
|
|
sampled_idx = sorted_idx[sample_positions]
|
|
else:
|
|
sampled_edges = sorted_edges
|
|
sampled_idx = sorted_idx
|
|
|
|
# Greedy selection on GPU
|
|
used = torch.zeros(v_alive.shape[0], dtype=torch.bool, device=device)
|
|
used[~v_alive] = True
|
|
|
|
selected = []
|
|
batch_size = 8192
|
|
|
|
for start in range(0, sampled_edges.shape[0], batch_size):
|
|
end = min(start + batch_size, sampled_edges.shape[0])
|
|
batch = sampled_edges[start:end]
|
|
batch_idx = sampled_idx[start:end]
|
|
|
|
va = batch[:, 0]
|
|
vb = batch[:, 1]
|
|
|
|
# Vectorized free check
|
|
free = ~used[va] & ~used[vb]
|
|
|
|
if not free.any():
|
|
continue
|
|
|
|
# Get free edges
|
|
free_local = torch.nonzero(free, as_tuple=True)[0]
|
|
free_edges = batch[free_local]
|
|
free_idx = batch_idx[free_local]
|
|
|
|
# Process free edges greedily but in larger chunks
|
|
# Transfer to CPU but only the small free subset
|
|
free_va = free_edges[:, 0].cpu().numpy()
|
|
free_vb = free_edges[:, 1].cpu().numpy()
|
|
free_edges_idx = free_idx.cpu().numpy()
|
|
|
|
for i in range(len(free_va)):
|
|
a = int(free_va[i])
|
|
b = int(free_vb[i])
|
|
if not used[a].item() and not used[b].item():
|
|
selected.append(int(free_edges_idx[i]))
|
|
used[a] = True
|
|
used[b] = True
|
|
if len(selected) >= max_select:
|
|
return torch.tensor(selected, dtype=torch.int64, device=device)
|
|
|
|
if len(selected) == 0:
|
|
return torch.empty(0, dtype=torch.int64, device=device)
|
|
return torch.tensor(selected, dtype=torch.int64, device=device)
|
|
|
|
def _qem_simplify(verts_np, faces_np, colors_np, target_faces, device, max_edge_length=None, use_triton=False, fast_mode=False):
|
|
verts = torch.from_numpy(verts_np).to(device=device, dtype=torch.float64)
|
|
faces = torch.from_numpy(faces_np).to(device=device, dtype=torch.int64)
|
|
colors = (
|
|
torch.from_numpy(colors_np).to(device=device, dtype=torch.float64)
|
|
if colors_np is not None
|
|
else None
|
|
)
|
|
|
|
num_verts = verts.shape[0]
|
|
num_faces = faces.shape[0]
|
|
|
|
logging.debug(f"[QEM] Input: {num_verts} verts, {num_faces} faces, target={target_faces}, fast={fast_mode}")
|
|
|
|
v_alive = torch.ones(num_verts, dtype=torch.bool, device=device)
|
|
f_alive = torch.ones(num_faces, dtype=torch.bool, device=device)
|
|
|
|
Q = _build_quadrics(verts, faces)
|
|
|
|
bbox = verts.max(dim=0)[0] - verts.min(dim=0)[0]
|
|
mesh_scale = torch.norm(bbox).item()
|
|
|
|
if max_edge_length is None or max_edge_length <= 0:
|
|
max_edge_length = mesh_scale * 2.0
|
|
|
|
if max_edge_length < 1e-6:
|
|
max_edge_length = 1.0
|
|
|
|
stabilizer = mesh_scale * mesh_scale * 0.001
|
|
area_thresh = mesh_scale * mesh_scale * 1e-10
|
|
max_edge_length_sq = max_edge_length * max_edge_length
|
|
mesh_scale_sq = mesh_scale * mesh_scale
|
|
|
|
iteration = 0
|
|
total_collapses = 0
|
|
last_faces = num_faces
|
|
|
|
while True:
|
|
n_faces = int(f_alive.sum().item())
|
|
|
|
if n_faces <= target_faces:
|
|
break
|
|
|
|
alive_v = torch.nonzero(v_alive, as_tuple=True)[0]
|
|
alive_f = torch.nonzero(f_alive, as_tuple=True)[0]
|
|
|
|
if alive_v.numel() <= 4 or alive_f.numel() == 0:
|
|
break
|
|
|
|
# Compact active mesh
|
|
vmap = torch.full((num_verts,), -1, dtype=torch.int64, device=device)
|
|
vmap[alive_v] = torch.arange(alive_v.numel(), device=device)
|
|
|
|
active_faces = faces[alive_f]
|
|
remapped = vmap[active_faces]
|
|
|
|
# Extract edges
|
|
e0 = remapped[:, [0, 1]]
|
|
e1 = remapped[:, [1, 2]]
|
|
e2 = remapped[:, [2, 0]]
|
|
edges = torch.cat([e0, e1, e2], dim=0)
|
|
edges = torch.sort(edges, dim=1)[0]
|
|
edges = edges[(edges >= 0).all(dim=1)]
|
|
edges = edges[edges[:, 0] != edges[:, 1]]
|
|
|
|
if edges.shape[0] == 0:
|
|
break
|
|
|
|
edges_orig = alive_v[edges]
|
|
|
|
# Filter by edge length
|
|
pa = verts[edges_orig[:, 0]]
|
|
pb = verts[edges_orig[:, 1]]
|
|
el = torch.norm(pb - pa, dim=-1)
|
|
short_enough = el < max_edge_length
|
|
|
|
if not short_enough.any():
|
|
max_edge_length = el.max().item() * 2.0
|
|
max_edge_length_sq = max_edge_length * max_edge_length
|
|
short_enough = el < max_edge_length
|
|
if not short_enough.any():
|
|
break
|
|
|
|
edges_orig = edges_orig[short_enough]
|
|
if edges_orig.shape[0] == 0:
|
|
break
|
|
|
|
# Sample edges for processing
|
|
n_edges_total = edges_orig.shape[0]
|
|
max_edges_to_process = 10_000_000 # 10M edges per iteration
|
|
|
|
if n_edges_total > max_edges_to_process:
|
|
perm = torch.randperm(n_edges_total, device=device)[:max_edges_to_process]
|
|
edges_orig = edges_orig[perm]
|
|
n_edges = max_edges_to_process
|
|
else:
|
|
n_edges = n_edges_total
|
|
|
|
# Compute edge errors
|
|
if use_triton and torch.cuda.is_available():
|
|
optimal, err, valid = _triton_edge_errors(
|
|
verts, Q, edges_orig, stabilizer, max_edge_length_sq, mesh_scale_sq
|
|
)
|
|
else:
|
|
optimal, err, valid = _pytorch_edge_errors(
|
|
verts, Q, edges_orig, stabilizer, max_edge_length_sq, mesh_scale_sq
|
|
)
|
|
|
|
if not valid.any():
|
|
valid = torch.ones(n_edges, dtype=torch.bool, device=device)
|
|
|
|
valid_idx = torch.nonzero(valid, as_tuple=True)[0]
|
|
edges_orig = edges_orig[valid_idx]
|
|
optimal = optimal[valid_idx]
|
|
err = err[valid_idx]
|
|
|
|
# KEY: Much larger batch size
|
|
faces_to_remove = n_faces - target_faces
|
|
max_collapses = min(50000, max(1000, faces_to_remove // 20))
|
|
|
|
sel = _gpu_greedy_sampled(edges_orig, err, v_alive, max_collapses)
|
|
|
|
if sel.numel() == 0:
|
|
break
|
|
|
|
v_a = edges_orig[sel, 0]
|
|
v_b = edges_orig[sel, 1]
|
|
opt_pos = optimal[sel]
|
|
|
|
# Build adjacency
|
|
face_indices, vert_ptrs = _build_vertex_face_csr(active_faces, num_verts)
|
|
|
|
# Build (edge, face) pairs
|
|
pair_edge_idx = []
|
|
pair_face_idx = []
|
|
|
|
va_cpu = v_a.cpu()
|
|
vb_cpu = v_b.cpu()
|
|
|
|
for ei, (vai, vbi) in enumerate(zip(va_cpu, vb_cpu)):
|
|
f_va = _get_vertex_faces(vai.item(), face_indices, vert_ptrs)
|
|
f_vb = _get_vertex_faces(vbi.item(), face_indices, vert_ptrs)
|
|
|
|
faces_vb = active_faces[f_vb]
|
|
mask_b = (faces_vb[:, 0] != vai) & (faces_vb[:, 1] != vai) & (faces_vb[:, 2] != vai)
|
|
f_vb_valid = f_vb[mask_b]
|
|
|
|
faces_va = active_faces[f_va]
|
|
mask_a = (faces_va[:, 0] != vbi) & (faces_va[:, 1] != vbi) & (faces_va[:, 2] != vbi)
|
|
f_va_valid = f_va[mask_a]
|
|
|
|
all_faces = torch.cat([f_vb_valid, f_va_valid])
|
|
if all_faces.numel() > 0:
|
|
pair_edge_idx.extend([ei] * all_faces.numel())
|
|
pair_face_idx.extend(all_faces.cpu().tolist())
|
|
|
|
keep_mask = torch.ones(v_a.numel(), dtype=torch.bool, device=device)
|
|
|
|
# Face validation (skip in fast_mode)
|
|
if not fast_mode and len(pair_edge_idx) > 0:
|
|
pair_edge_idx_t = torch.tensor(pair_edge_idx, dtype=torch.int64, device=device)
|
|
pair_face_idx_t = torch.tensor(pair_face_idx, dtype=torch.int64, device=device)
|
|
|
|
if use_triton and torch.cuda.is_available():
|
|
keep_mask = _triton_validate_faces(
|
|
verts, active_faces, v_a, v_b, opt_pos,
|
|
pair_edge_idx_t, pair_face_idx_t, area_thresh
|
|
)
|
|
else:
|
|
keep_mask = _pytorch_validate_faces(
|
|
verts, active_faces, v_a, v_b, opt_pos,
|
|
pair_edge_idx_t, pair_face_idx_t, area_thresh
|
|
)
|
|
|
|
# Link condition (skip in fast_mode for massive speedup)
|
|
if not fast_mode:
|
|
# Vectorized link condition using GPU operations
|
|
link_keep = torch.ones(v_a.numel(), dtype=torch.bool, device=device)
|
|
|
|
# Build neighbor sets for va and vb using GPU operations
|
|
for ei in range(v_a.numel()):
|
|
vai = v_a[ei].item()
|
|
vbi = v_b[ei].item()
|
|
|
|
f_va = _get_vertex_faces(vai, face_indices, vert_ptrs)
|
|
f_vb = _get_vertex_faces(vbi, face_indices, vert_ptrs)
|
|
|
|
if f_va.numel() == 0 or f_vb.numel() == 0:
|
|
continue
|
|
|
|
faces_va = active_faces[f_va]
|
|
verts_va = faces_va[faces_va != vai]
|
|
verts_va = verts_va[verts_va != vbi]
|
|
|
|
faces_vb = active_faces[f_vb]
|
|
verts_vb = faces_vb[faces_vb != vbi]
|
|
verts_vb = verts_vb[verts_vb != vai]
|
|
|
|
if verts_va.numel() == 0 or verts_vb.numel() == 0:
|
|
continue
|
|
|
|
# Use torch.intersect1d for GPU-native intersection
|
|
common = torch.intersect1d(verts_va, verts_vb)
|
|
if common.numel() > 2:
|
|
link_keep[ei] = False
|
|
|
|
keep_mask &= link_keep
|
|
|
|
if not keep_mask.any():
|
|
break
|
|
|
|
keep_idx = torch.nonzero(keep_mask, as_tuple=True)[0]
|
|
v_a = v_a[keep_idx]
|
|
v_b = v_b[keep_idx]
|
|
sel = sel[keep_idx]
|
|
|
|
# Apply collapses
|
|
verts[v_a] = optimal[sel]
|
|
v_alive[v_b] = False
|
|
Q[v_a] += Q[v_b]
|
|
|
|
if colors is not None:
|
|
colors[v_a] = (colors[v_a] + colors[v_b]) * 0.5
|
|
|
|
merge_map = torch.arange(num_verts, device=device)
|
|
merge_map[v_b] = v_a
|
|
faces = merge_map[faces]
|
|
|
|
bad = (
|
|
(faces[:, 0] == faces[:, 1])
|
|
| (faces[:, 1] == faces[:, 2])
|
|
| (faces[:, 2] == faces[:, 0])
|
|
)
|
|
f_alive &= ~bad
|
|
|
|
total_collapses += v_a.numel()
|
|
iteration += 1
|
|
|
|
# Log only every 50 iterations to reduce sync overhead
|
|
if iteration % 50 == 0 or n_faces < last_faces * 0.9:
|
|
logging.debug(f"[QEM] Iter {iteration}: {total_collapses} collapses, {int(f_alive.sum().item())} faces, applied {v_a.numel()}")
|
|
last_faces = n_faces
|
|
|
|
# Periodic compaction
|
|
if iteration % 5 == 0 and int(f_alive.sum().item()) < num_faces * 0.5:
|
|
faces = faces[f_alive]
|
|
f_alive = torch.ones(faces.shape[0], dtype=torch.bool, device=device)
|
|
num_faces = faces.shape[0]
|
|
|
|
if iteration > 5000:
|
|
break
|
|
|
|
# Finalize
|
|
final_v = verts[v_alive]
|
|
final_c = colors[v_alive] if colors is not None else None
|
|
|
|
remap = torch.full((num_verts,), -1, dtype=torch.int64, device=device)
|
|
remap[v_alive] = torch.arange(int(v_alive.sum().item()), device=device)
|
|
|
|
final_f_raw = faces[f_alive]
|
|
alive_mask = v_alive[final_f_raw].all(dim=1)
|
|
final_f_raw = final_f_raw[alive_mask]
|
|
final_f = remap[final_f_raw]
|
|
valid_faces = (final_f >= 0).all(dim=1)
|
|
final_f = final_f[valid_faces]
|
|
|
|
if final_f.numel() > 0:
|
|
final_f = torch.unique(torch.sort(final_f, dim=1)[0], dim=0)
|
|
|
|
final_v, final_f = _cleanup_mesh(final_v, final_f, min_angle_deg=0.5, max_aspect=100.0)
|
|
|
|
return final_v, final_f, final_c
|
|
|
|
def simplify_fn(vertices, faces, colors=None, target=100000, max_edge_length=None, use_triton=True, fast_mode=True):
|
|
if vertices.ndim == 3:
|
|
v_list, f_list, c_list = [], [], []
|
|
for i in range(vertices.shape[0]):
|
|
c_in = colors[i] if colors is not None else None
|
|
v_i, f_i, c_i = simplify_fn(vertices[i], faces[i], c_in, target, max_edge_length, use_triton, fast_mode)
|
|
v_list.append(v_i)
|
|
f_list.append(f_i)
|
|
if c_i is not None:
|
|
c_list.append(c_i)
|
|
c_out = torch.stack(c_list) if len(c_list) > 0 else None
|
|
return torch.stack(v_list), torch.stack(f_list), c_out
|
|
|
|
if faces.shape[0] <= target:
|
|
return vertices, faces, colors
|
|
|
|
device = vertices.device
|
|
dtype = vertices.dtype
|
|
|
|
verts_np = vertices.detach().cpu().numpy().astype(np.float64)
|
|
faces_np = faces.detach().cpu().numpy().astype(np.int64)
|
|
colors_np = (
|
|
colors.detach().cpu().numpy().astype(np.float64)
|
|
if colors is not None
|
|
else None
|
|
)
|
|
|
|
out_v, out_f, out_c = _qem_simplify(
|
|
verts_np, faces_np, colors_np, target, device, max_edge_length, use_triton, fast_mode
|
|
)
|
|
|
|
final_v = out_v.to(device=device, dtype=dtype)
|
|
final_f = out_f.to(device=device, dtype=faces.dtype)
|
|
final_c = (
|
|
out_c.to(device=device, dtype=colors.dtype)
|
|
if out_c is not None
|
|
else None
|
|
)
|
|
return final_v, final_f, final_c
|
|
|
|
def fill_holes_fn(vertices, faces, max_perimeter=0.03):
|
|
is_batched = vertices.ndim == 3
|
|
if is_batched:
|
|
v_list, f_list = [],[]
|
|
for i in range(vertices.shape[0]):
|
|
v_i, f_i = fill_holes_fn(vertices[i], faces[i], max_perimeter)
|
|
v_list.append(v_i)
|
|
f_list.append(f_i)
|
|
return torch.stack(v_list), torch.stack(f_list)
|
|
|
|
device = vertices.device
|
|
v = vertices
|
|
f = faces
|
|
|
|
if f.numel() == 0:
|
|
return v, f
|
|
|
|
edges = torch.cat([f[:, [0, 1]], f[:, [1, 2]], f[:, [2, 0]]], dim=0)
|
|
edges_sorted, _ = torch.sort(edges, dim=1)
|
|
|
|
max_v = v.shape[0]
|
|
packed_undirected = edges_sorted[:, 0].long() * max_v + edges_sorted[:, 1].long()
|
|
|
|
unique_packed, counts = torch.unique(packed_undirected, return_counts=True)
|
|
boundary_packed = unique_packed[counts == 1]
|
|
|
|
if boundary_packed.numel() == 0:
|
|
return v, f
|
|
|
|
packed_directed_sorted = edges[:, 0].min(edges[:, 1]).long() * max_v + edges[:, 0].max(edges[:, 1]).long()
|
|
is_boundary = torch.isin(packed_directed_sorted, boundary_packed)
|
|
b_edges = edges[is_boundary]
|
|
|
|
adj = {u.item(): v_idx.item() for u, v_idx in b_edges}
|
|
|
|
loops =[]
|
|
visited = set()
|
|
|
|
for start_node in adj.keys():
|
|
if start_node in visited:
|
|
continue
|
|
|
|
curr = start_node
|
|
loop = []
|
|
|
|
while curr not in visited:
|
|
visited.add(curr)
|
|
loop.append(curr)
|
|
curr = adj.get(curr, -1)
|
|
|
|
if curr == -1:
|
|
loop = []
|
|
break
|
|
if curr == start_node:
|
|
loops.append(loop)
|
|
break
|
|
|
|
new_verts =[]
|
|
new_faces = []
|
|
v_idx = v.shape[0]
|
|
|
|
for loop in loops:
|
|
loop_t = torch.tensor(loop, device=device, dtype=torch.long)
|
|
loop_v = v[loop_t]
|
|
|
|
diffs = loop_v - torch.roll(loop_v, shifts=-1, dims=0)
|
|
perimeter = torch.norm(diffs, dim=1).sum().item()
|
|
|
|
if perimeter <= max_perimeter:
|
|
new_verts.append(loop_v.mean(dim=0))
|
|
|
|
for i in range(len(loop)):
|
|
new_faces.append([loop[(i + 1) % len(loop)], loop[i], v_idx])
|
|
v_idx += 1
|
|
|
|
if new_verts:
|
|
v = torch.cat([v, torch.stack(new_verts)], dim=0)
|
|
f = torch.cat([f, torch.tensor(new_faces, device=device, dtype=torch.long)], dim=0)
|
|
|
|
return v, f
|
|
|
|
|
|
def make_double_sided(vertices, faces):
|
|
is_batched = vertices.ndim == 3
|
|
if is_batched:
|
|
f_list = []
|
|
for i in range(faces.shape[0]):
|
|
f_inv = faces[i][:, [0, 2, 1]]
|
|
f_list.append(torch.cat([faces[i], f_inv], dim=0))
|
|
return vertices, torch.stack(f_list)
|
|
|
|
faces_inv = faces[:, [0, 2, 1]]
|
|
return vertices, torch.cat([faces, faces_inv], dim=0)
|
|
|
|
class PostProcessMesh(IO.ComfyNode):
|
|
@classmethod
|
|
def define_schema(cls):
|
|
return IO.Schema(
|
|
node_id="PostProcessMesh",
|
|
category="latent/3d",
|
|
inputs=[
|
|
IO.Mesh.Input("mesh"),
|
|
IO.Int.Input("simplify", default=1_000_000, min=0, max=50_000_000),
|
|
IO.Float.Input("fill_holes_perimeter", default=0.03, min=0.0, step=0.0001)
|
|
],
|
|
outputs=[
|
|
IO.Mesh.Output("output_mesh"),
|
|
]
|
|
)
|
|
|
|
@classmethod
|
|
def execute(cls, mesh, simplify, fill_holes_perimeter):
|
|
# input should be comfy.NestedTensor
|
|
mesh = copy.deepcopy(mesh)
|
|
|
|
def process_single(v, f, c):
|
|
if fill_holes_perimeter > 0:
|
|
v, f = fill_holes_fn(v, f, max_perimeter=fill_holes_perimeter)
|
|
|
|
if simplify > 0 and f.shape[0] > simplify:
|
|
v, f, c = simplify_fn(v, f, colors=c, target=simplify)
|
|
|
|
v, f = make_double_sided(v, f)
|
|
return v, f, c
|
|
|
|
# Check if batch is Jagged (List) or Uniform (3D Tensor)
|
|
is_list = isinstance(mesh.vertices, list)
|
|
is_batched_tensor = not is_list and mesh.vertices.ndim == 3
|
|
|
|
if is_list or is_batched_tensor:
|
|
out_v, out_f, out_c = [], [],[]
|
|
bsz = len(mesh.vertices) if is_list else mesh.vertices.shape[0]
|
|
|
|
for i in range(bsz):
|
|
v_i = mesh.vertices[i]
|
|
f_i = mesh.faces[i]
|
|
|
|
# Safely grab colors if they exist
|
|
c_i = None
|
|
if hasattr(mesh, 'colors') and mesh.colors is not None:
|
|
c_i = mesh.colors[i] if (isinstance(mesh.colors, list) or mesh.colors.ndim == 3) else mesh.colors
|
|
|
|
v_i, f_i, c_i = process_single(v_i, f_i, c_i)
|
|
|
|
out_v.append(v_i)
|
|
out_f.append(f_i)
|
|
if c_i is not None:
|
|
out_c.append(c_i)
|
|
|
|
# If the output meshes happen to have the exact same shape, stack them nicely.
|
|
# Otherwise, just leave them as a List! (ComfyUI native standard)
|
|
if all(v.shape == out_v[0].shape for v in out_v) and all(f.shape == out_f[0].shape for f in out_f):
|
|
mesh.vertices = torch.stack(out_v)
|
|
mesh.faces = torch.stack(out_f)
|
|
if out_c:
|
|
mesh.colors = torch.stack(out_c)
|
|
else:
|
|
mesh.vertices = out_v
|
|
mesh.faces = out_f
|
|
if out_c:
|
|
mesh.colors = out_c
|
|
|
|
else:
|
|
# Single Unbatched Mesh[V, 3]
|
|
c = mesh.colors if hasattr(mesh, 'colors') and mesh.colors is not None else None
|
|
v, f, c = process_single(mesh.vertices, mesh.faces, c)
|
|
mesh.vertices = v
|
|
mesh.faces = f
|
|
if c is not None:
|
|
mesh.colors = c
|
|
|
|
return IO.NodeOutput(mesh)
|
|
|
|
class Trellis2Extension(ComfyExtension):
|
|
@override
|
|
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
|
return [
|
|
Trellis2Conditioning,
|
|
EmptyShapeLatentTrellis2,
|
|
EmptyStructureLatentTrellis2,
|
|
EmptyTextureLatentTrellis2,
|
|
VaeDecodeTrellis2Texture,
|
|
VaeDecodeTrellis2Shape,
|
|
VaeDecodeTrellis2Structure,
|
|
Trellis2UpsampleCascade,
|
|
PostProcessMesh
|
|
]
|
|
|
|
|
|
async def comfy_entrypoint() -> Trellis2Extension:
|
|
return Trellis2Extension()
|