From c81ddf23498d27f82293272494fd66f31dacb7fc Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 11:06:04 -0500 Subject: [PATCH 1/8] Fix Trellis2 batched shape and texture semantics --- comfy/ldm/trellis2/model.py | 347 ++++++++++++++++++++++++++++++--- comfy/sample.py | 17 ++ comfy_extras/nodes_trellis2.py | 332 +++++++++++++++++++++++++++---- 3 files changed, 635 insertions(+), 61 deletions(-) diff --git a/comfy/ldm/trellis2/model.py b/comfy/ldm/trellis2/model.py index 1c5d6c3ec..76dbacc93 100644 --- a/comfy/ldm/trellis2/model.py +++ b/comfy/ldm/trellis2/model.py @@ -786,6 +786,7 @@ class Trellis2(nn.Module): # 32 -> 512px path, 64 -> 1024px path. uses_1024_conditioning = self.img2shape.resolution == 64 coords = transformer_options.get("coords", None) + coord_counts = transformer_options.get("coord_counts") mode = transformer_options.get("generation_mode", "structure_generation") is_512_run = False timestep = timestep.to(self.dtype) @@ -811,40 +812,205 @@ class Trellis2(nn.Module): cond = context shape_rule = sigmas < self.guidance_interval[0] or sigmas > self.guidance_interval[1] txt_rule = sigmas < self.guidance_interval_txt[0] or sigmas > self.guidance_interval_txt[1] + dense_out = None if not_struct_mode: orig_bsz = x.shape[0] rule = txt_rule if mode == "texture_generation" else shape_rule - if rule and orig_bsz > 1: - x_eval = x[1].unsqueeze(0) - t_eval = timestep[1].unsqueeze(0) if timestep.shape[0] > 1 else timestep + logical_batch = coord_counts.shape[0] if coord_counts is not None else 1 + if rule and orig_bsz > logical_batch: + half = orig_bsz // 2 + x_eval = x[half:] + t_eval = timestep[half:] if timestep.shape[0] > 1 else timestep c_eval = cond else: x_eval = x t_eval = timestep c_eval = context + x_eval_norms = [float(v) for v in x_eval.square().sum(dim=(1, 2)).detach().cpu().tolist()] + c_eval_norms = [float(v) for v in c_eval.square().sum(dim=(1, 2)).detach().cpu().tolist()] + print( + "TRELLIS2_NOT_STRUCT_INPUT_TRACE", + { + "mode": mode, + "orig_bsz": int(orig_bsz), + "logical_batch": int(logical_batch), + "rule": bool(rule), + "coord_counts": coord_counts.tolist() if coord_counts is not None else None, + "x_eval_norms": x_eval_norms, + "c_eval_norms": c_eval_norms, + }, + ) + B, N, C = x_eval.shape if mode in ["shape_generation", "texture_generation"]: - feats_flat = x_eval.reshape(-1, C) + if coord_counts is not None: + logical_batch = coord_counts.shape[0] + if B % logical_batch != 0: + raise ValueError( + f"Trellis2 coord_counts batch {logical_batch} doesn't divide latent batch {B}" + ) + repeat_factor = B // logical_batch + sparse_outs = [] + active_coord_counts = [] + if mode == "shape_generation" and repeat_factor > 1: + grouped_outs = [] + grouped_counts = [] + for i in range(logical_batch): + count = int(coord_counts[i].item()) + coords_i = coords[coords[:, 0] == i].clone() + if coords_i.shape[0] != count: + raise ValueError( + f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}" + ) - # inflate coords [N, 4] -> [B*N, 4] - coords_list = [] - for i in range(B): - c = coords.clone() - c[:, 0] = i - coords_list.append(c) + feat_batches = [] + coord_batches = [] + index_batch = [] + for rep in range(repeat_factor): + out_index = rep * logical_batch + i + feat_batches.append(x_eval[out_index, :count]) + coords_rep = coords_i.clone() + coords_rep[:, 0] = rep + coord_batches.append(coords_rep) + index_batch.append(out_index) - batched_coords = torch.cat(coords_list, dim=0) + print( + "TRELLIS2_GROUPED_INPUT_TRACE", + { + "mode": mode, + "sample_index": int(i), + "coord_count": int(count), + "feat_norms": [float(v.square().sum().detach().cpu().item()) for v in feat_batches], + }, + ) + + x_st_i = SparseTensor( + feats=torch.cat(feat_batches, dim=0), + coords=torch.cat(coord_batches, dim=0).to(torch.int32), + ) + index_tensor = torch.tensor(index_batch, device=x_eval.device, dtype=torch.long) + if t_eval.shape[0] > 1: + t_i = t_eval.index_select(0, index_tensor) + else: + t_i = t_eval + if c_eval.shape[0] > 1: + c_i = c_eval.index_select(0, index_tensor) + else: + c_i = c_eval + + if is_512_run: + sparse_out = self.img2shape_512(x_st_i, t_i, c_i) + else: + sparse_out = self.img2shape(x_st_i, t_i, c_i) + + feats_group, coords_group = sparse_out.to_tensor_list() + if len(feats_group) != repeat_factor: + raise ValueError( + f"Trellis2 expected {repeat_factor} sparse output groups for batch {i}, got {len(feats_group)}" + ) + for rep, (feats_rep, coords_rep) in enumerate(zip(feats_group, coords_group)): + if feats_rep.shape[0] != count: + raise ValueError( + f"Trellis2 sparse output rows for batch {i} rep {rep} expected {count}, got {feats_rep.shape[0]}" + ) + if coords_rep.shape[0] != count: + raise ValueError( + f"Trellis2 sparse output coords for batch {i} rep {rep} expected {count}, got {coords_rep.shape[0]}" + ) + grouped_outs.append(feats_group) + grouped_counts.append(count) + + for rep in range(repeat_factor): + for i in range(logical_batch): + sparse_outs.append(grouped_outs[i][rep]) + active_coord_counts.append(grouped_counts[i]) + else: + for rep in range(repeat_factor): + for i in range(logical_batch): + out_index = rep * logical_batch + i + count = int(coord_counts[i].item()) + coords_i = coords[coords[:, 0] == i].clone() + if coords_i.shape[0] != count: + raise ValueError( + f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}" + ) + coords_i[:, 0] = 0 + feats_i = x_eval[out_index, :count] + x_st_i = SparseTensor(feats=feats_i, coords=coords_i.to(torch.int32)) + t_i = t_eval[out_index].unsqueeze(0) if t_eval.shape[0] > 1 else t_eval + c_i = c_eval[out_index].unsqueeze(0) if c_eval.shape[0] > 1 else c_eval + + if mode == "shape_generation": + if is_512_run: + sparse_out = self.img2shape_512(x_st_i, t_i, c_i) + else: + sparse_out = self.img2shape(x_st_i, t_i, c_i) + else: + slat = transformer_options.get("shape_slat") + if slat is None: + raise ValueError("shape_slat can't be None") + if slat.ndim == 3: + if slat.shape[0] != logical_batch: + raise ValueError( + f"shape_slat batch {slat.shape[0]} doesn't match coord_counts batch {logical_batch}" + ) + if slat.shape[1] < count: + raise ValueError( + f"shape_slat tokens {slat.shape[1]} can't cover coord count {count} for batch {i}" + ) + slat_feats = slat[i, :count].to(x_st_i.device) + else: + slat_feats = slat[:count].to(x_st_i.device) + x_st_i = x_st_i.replace(feats=torch.cat([x_st_i.feats, slat_feats], dim=-1)) + sparse_out = self.shape2txt(x_st_i, t_i, c_i) + + sparse_outs.append(sparse_out.feats) + active_coord_counts.append(count) + + out_channels = sparse_outs[0].shape[-1] + sparse_out_norms = [float(feats.square().sum().detach().cpu().item()) for feats in sparse_outs] + print( + "TRELLIS2_SPARSE_OUT_TRACE", + { + "mode": mode, + "coords_rows": int(coords.shape[0]), + "active_coord_counts": active_coord_counts, + "sparse_out_norms": sparse_out_norms, + }, + ) + padded = sparse_outs[0].new_zeros((B, N, out_channels)) + for out_index, (count, feats_i) in enumerate(zip(active_coord_counts, sparse_outs)): + padded[out_index, :count] = feats_i + dense_out = padded.transpose(1, 2).unsqueeze(-1) + elif coords.shape[0] == N: + feats_flat = x_eval.reshape(-1, C) + coords_list = [] + for i in range(B): + c = coords.clone() + c[:, 0] = i + coords_list.append(c) + batched_coords = torch.cat(coords_list, dim=0) + elif coords.shape[0] == B * N: + feats_flat = x_eval.reshape(-1, C) + batched_coords = coords + else: + raise ValueError( + f"Trellis2 expected coords rows {N} or {B * N}, got {coords.shape[0]}" + ) else: batched_coords = coords feats_flat = x_eval - x_st = SparseTensor(feats=feats_flat, coords=batched_coords.to(torch.int32)) + if dense_out is None: + x_st = SparseTensor(feats=feats_flat, coords=batched_coords.to(torch.int32)) - if mode == "shape_generation": + if dense_out is not None: + out = dense_out + elif mode == "shape_generation": if is_512_run: out = self.img2shape_512(x_st, t_eval, c_eval) else: @@ -856,23 +1022,152 @@ class Trellis2(nn.Module): if slat is None: raise ValueError("shape_slat can't be None") - base_slat_feats = slat[:N] - slat_feats_batched = base_slat_feats.repeat(B, 1).to(x_st.device) + if slat.ndim == 3: + if coord_counts is not None: + logical_batch = coord_counts.shape[0] + if slat.shape[0] != logical_batch: + raise ValueError( + f"shape_slat batch {slat.shape[0]} doesn't match coord_counts batch {logical_batch}" + ) + if B % logical_batch != 0: + raise ValueError( + f"Trellis2 coord_counts batch {logical_batch} doesn't divide latent batch {B}" + ) + repeat_factor = B // logical_batch + slat_list = [] + for _ in range(repeat_factor): + for i in range(logical_batch): + count = int(coord_counts[i].item()) + if slat.shape[1] < count: + raise ValueError( + f"shape_slat tokens {slat.shape[1]} can't cover coord count {count} for batch {i}" + ) + slat_list.append(slat[i, :count]) + slat_feats_batched = torch.cat(slat_list, dim=0).to(x_st.device) + else: + if slat.shape[0] != B: + raise ValueError(f"shape_slat batch {slat.shape[0]} doesn't match latent batch {B}") + if slat.shape[1] != N: + raise ValueError(f"shape_slat tokens {slat.shape[1]} doesn't match latent tokens {N}") + slat_feats_batched = slat.reshape(B * N, -1).to(x_st.device) + else: + base_slat_feats = slat[:N] + slat_feats_batched = base_slat_feats.repeat(B, 1).to(x_st.device) x_st = x_st.replace(feats=torch.cat([x_st.feats, slat_feats_batched], dim=-1)) out = self.shape2txt(x_st, t_eval, c_eval) else: # structure orig_bsz = x.shape[0] - if shape_rule and orig_bsz > 1: - half = orig_bsz // 2 - x = x[half:] - timestep = timestep[half:] if timestep.shape[0] > 1 else timestep - out = self.structure_model(x, timestep, cond if shape_rule and orig_bsz > 1 else context) - if shape_rule and orig_bsz > 1: - out = out.repeat(2, 1, 1, 1, 1) + cond_or_uncond = transformer_options.get("cond_or_uncond") or [] + batch_groups = len(cond_or_uncond) if len(cond_or_uncond) > 0 and orig_bsz % len(cond_or_uncond) == 0 else 1 + logical_batch = orig_bsz // batch_groups + print( + "TRELLIS2_STRUCTURE_INPUT_TRACE", + { + "orig_bsz": int(orig_bsz), + "batch_groups": int(batch_groups), + "logical_batch": int(logical_batch), + "cond_or_uncond": cond_or_uncond, + "x_norms": [float(v) for v in x.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], + "x_sums": [float(v) for v in x.sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], + "c_norms": [float(v) for v in context.square().sum(dim=(1, 2)).detach().cpu().tolist()], + "c_sums": [float(v) for v in context.sum(dim=(1, 2)).detach().cpu().tolist()], + }, + ) + + if logical_batch > 1: + x_groups = x.reshape(batch_groups, logical_batch, *x.shape[1:]) + if timestep.shape[0] > 1: + t_groups = timestep.reshape(batch_groups, logical_batch, *timestep.shape[1:]) + else: + t_groups = timestep + c_groups = context.reshape(batch_groups, logical_batch, *context.shape[1:]) + + if shape_rule and batch_groups > 1: + selected_group_indices = [batch_groups - 1] + else: + selected_group_indices = list(range(batch_groups)) + + out_groups = [] + selected_x_norms = [] + selected_x_sums = [] + selected_c_norms = [] + selected_c_sums = [] + for sample_index in range(logical_batch): + if shape_rule and batch_groups > 1: + half = orig_bsz // 2 + x_i = x[half + sample_index].unsqueeze(0) + if timestep.shape[0] > 1: + t_i = timestep[half + sample_index].unsqueeze(0) + else: + t_i = timestep + if cond.shape[0] > 1: + c_i = cond[sample_index].unsqueeze(0) + else: + c_i = cond + else: + x_i = x_groups[selected_group_indices, sample_index] + if timestep.shape[0] > 1: + t_i = t_groups[selected_group_indices, sample_index] + else: + t_i = timestep + c_i = c_groups[selected_group_indices, sample_index] + selected_x_norms.extend(float(v) for v in x_i.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()) + selected_x_sums.extend(float(v) for v in x_i.sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()) + selected_c_norms.extend(float(v) for v in c_i.square().sum(dim=(1, 2)).detach().cpu().tolist()) + selected_c_sums.extend(float(v) for v in c_i.sum(dim=(1, 2)).detach().cpu().tolist()) + out_groups.append(self.structure_model(x_i, t_i, c_i)) + + print( + "TRELLIS2_STRUCTURE_SELECTED_TRACE", + { + "selected_group_indices": selected_group_indices, + "selected_x_norms": selected_x_norms, + "selected_x_sums": selected_x_sums, + "selected_c_norms": selected_c_norms, + "selected_c_sums": selected_c_sums, + }, + ) + + out = out_groups[0].new_zeros((orig_bsz, *out_groups[0].shape[1:])) + for sample_index, out_sample in enumerate(out_groups): + if shape_rule and batch_groups > 1: + repeated = out_sample[0] + for group_index in range(batch_groups): + out[group_index * logical_batch + sample_index] = repeated + else: + for local_group_index, group_index in enumerate(selected_group_indices): + out[group_index * logical_batch + sample_index] = out_sample[local_group_index] + else: + if shape_rule and orig_bsz > 1: + half = orig_bsz // 2 + x = x[half:] + timestep = timestep[half:] if timestep.shape[0] > 1 else timestep + out = self.structure_model(x, timestep, cond if shape_rule and orig_bsz > 1 else context) + if shape_rule and orig_bsz > 1: + out = out.repeat(2, 1, 1, 1, 1) + + print( + "TRELLIS2_STRUCTURE_OUTPUT_TRACE", + { + "out_norms": [float(v) for v in out.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], + "out_sums": [float(v) for v in out.sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], + }, + ) if not_struct_mode: - out = out.feats - out = out.view(B, N, -1).transpose(1, 2).unsqueeze(-1) - if rule and orig_bsz > 1: - out = out.repeat(orig_bsz, 1, 1, 1) + if dense_out is None: + out = out.feats + out = out.view(B, N, -1).transpose(1, 2).unsqueeze(-1) + if rule and orig_bsz > B: + out = out.repeat(orig_bsz // B, 1, 1, 1) + print( + "TRELLIS2_DENSE_OUT_TRACE", + { + "mode": mode, + "coords_rows": int(coords.shape[0]) if coords is not None else None, + "output_shape": list(out.shape), + "output_norms": [float(v) for v in out.squeeze(-1).square().sum(dim=(1, 2)).detach().cpu().tolist()], + "coord_counts": coord_counts.tolist() if coord_counts is not None else None, + }, + ) return out diff --git a/comfy/sample.py b/comfy/sample.py index 653829582..3967fba1b 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -7,6 +7,23 @@ import logging import comfy.nested_tensor def prepare_noise_inner(latent_image, generator, noise_inds=None): + coord_counts = getattr(latent_image, "trellis_coord_counts", None) + if coord_counts is not None: + noise = torch.zeros(latent_image.size(), dtype=torch.float32, layout=latent_image.layout, device="cpu") + base_state = generator.get_state() + for i, count in enumerate(coord_counts.tolist()): + local_generator = torch.Generator(device="cpu") + local_generator.set_state(base_state.clone()) + sample_noise = torch.randn( + [1, latent_image.size(1), int(count), latent_image.size(3)], + dtype=torch.float32, + layout=latent_image.layout, + generator=local_generator, + device="cpu", + ) + noise[i:i + 1, :, :int(count), :] = sample_noise + return noise.to(dtype=latent_image.dtype) + if noise_inds is None: return torch.randn(latent_image.size(), dtype=torch.float32, layout=latent_image.layout, generator=generator, device="cpu").to(dtype=latent_image.dtype) diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index 8121e261b..26cb135e7 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -96,6 +96,70 @@ def shape_norm(shape_latent, coords): samples = samples * std + mean return samples + +def infer_batched_coord_layout(coords): + if coords.ndim != 2 or coords.shape[1] != 4: + raise ValueError(f"Expected Trellis2 coords with shape [N, 4], got {tuple(coords.shape)}") + + if coords.shape[0] == 0: + raise ValueError("Trellis2 coords can't be empty") + + batch_ids = coords[:, 0].to(torch.int64) + batch_size = int(batch_ids.max().item()) + 1 + counts = torch.bincount(batch_ids, minlength=batch_size) + + if (counts == 0).any(): + raise ValueError(f"Non-contiguous Trellis2 batch ids in coords: {batch_ids.unique(sorted=True).tolist()}") + + max_tokens = int(counts.max().item()) + return batch_size, counts, max_tokens + + +def flatten_batched_sparse_latent(samples, coords, coord_counts): + samples = samples.squeeze(-1).transpose(1, 2) + if coord_counts is None: + return samples.reshape(-1, samples.shape[-1]), coords + + feat_list = [] + coord_list = [] + for i in range(coord_counts.shape[0]): + count = int(coord_counts[i].item()) + coords_i = coords[coords[:, 0] == i] + if coords_i.shape[0] != count: + raise ValueError(f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}") + feat_list.append(samples[i, :count]) + coord_list.append(coords_i) + + return torch.cat(feat_list, dim=0), torch.cat(coord_list, dim=0) + + +def split_batched_sparse_latent(samples, coords, coord_counts): + samples = samples.squeeze(-1).transpose(1, 2) + if coord_counts is None: + return [(samples.reshape(-1, samples.shape[-1]), coords)] + + items = [] + for i in range(coord_counts.shape[0]): + count = int(coord_counts[i].item()) + coords_i = coords[coords[:, 0] == i] + if coords_i.shape[0] != count: + raise ValueError(f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}") + items.append((samples[i, :count], coords_i)) + return items + + +def log_sparse_batch_trace(tag, items): + feat_norms = [float(feats.square().sum().detach().cpu().item()) for feats, _ in items] + coord_rows = [int(coords_i.shape[0]) for _, coords_i in items] + print( + tag, + { + "batch_size": len(items), + "coord_rows": coord_rows, + "feat_norms": feat_norms, + }, + ) + def paint_mesh_with_voxels(mesh, voxel_coords, voxel_colors, resolution): """ Generic function to paint a mesh using nearest-neighbor colors from a sparse voxel field. @@ -169,12 +233,32 @@ class VaeDecodeShapeTrellis(IO.ComfyNode): vae = vae.first_stage_model coords = samples["coords"] + coord_counts = samples.get("coord_counts") samples = samples["samples"] - samples = samples.squeeze(-1).transpose(1, 2).reshape(-1, 32).to(device) - samples = shape_norm(samples, coords) + if coord_counts is None: + samples, coords = flatten_batched_sparse_latent(samples, coords, coord_counts) + samples = shape_norm(samples.to(device), coords.to(device)) + mesh, subs = vae.decode_shape_slat(samples, resolution) + else: + split_items = split_batched_sparse_latent(samples, coords, coord_counts) + mesh = [] + subs_per_sample = [] + for feats_i, coords_i in split_items: + coords_i = coords_i.to(device).clone() + coords_i[:, 0] = 0 + sample_i = shape_norm(feats_i.to(device), coords_i) + mesh_i, subs_i = vae.decode_shape_slat(sample_i, resolution) + mesh.append(mesh_i[0]) + subs_per_sample.append(subs_i) + + subs = [] + for stage_index in range(len(subs_per_sample[0])): + stage_tensors = [sample_subs[stage_index] for sample_subs in subs_per_sample] + feats_list = [stage_tensor.feats for stage_tensor in stage_tensors] + coords_list = [stage_tensor.coords for stage_tensor in stage_tensors] + subs.append(SparseTensor.from_tensor_list(feats_list, coords_list)) - mesh, subs = vae.decode_shape_slat(samples, resolution) face_list = [m.faces for m in mesh] vert_list = [m.vertices for m in mesh] if all(v.shape == vert_list[0].shape for v in vert_list) and all(f.shape == face_list[0].shape for f in face_list): @@ -210,12 +294,14 @@ class VaeDecodeTextureTrellis(IO.ComfyNode): vae = vae.first_stage_model coords = samples["coords"] + coord_counts = samples.get("coord_counts") samples = samples["samples"] - samples = samples.squeeze(-1).transpose(1, 2).reshape(-1, 32).to(device) + samples, coords = flatten_batched_sparse_latent(samples, coords, coord_counts) + samples = samples.to(device) std = tex_slat_normalization["std"].to(samples) mean = tex_slat_normalization["mean"].to(samples) - samples = SparseTensor(feats = samples, coords=coords) + samples = SparseTensor(feats = samples, coords=coords.to(device)) samples = samples * std + mean voxel = vae.decode_tex_slat(samples, shape_subs) @@ -273,7 +359,13 @@ class VaeDecodeStructureTrellis2(IO.ComfyNode): decoder = decoder.to(load_device) samples = samples["samples"] samples = samples.to(load_device) - decoded = decoder(samples)>0 + if samples.shape[0] > 1: + decoded_items = [] + for i in range(samples.shape[0]): + decoded_items.append(decoder(samples[i:i + 1]) > 0) + decoded = torch.cat(decoded_items, dim=0) + else: + decoded = decoder(samples) > 0 decoder.to(offload_device) current_res = decoded.shape[2] @@ -305,32 +397,102 @@ class Trellis2UpsampleCascade(IO.ComfyNode): device = comfy.model_management.get_torch_device() comfy.model_management.load_model_gpu(vae.patcher) - feats = shape_latent_512["samples"].squeeze(-1).transpose(1, 2).reshape(-1, 32).to(device) - coords_512 = shape_latent_512["coords"].to(device) - - slat = shape_norm(feats, coords_512) - + coord_counts = shape_latent_512.get("coord_counts") decoder = vae.first_stage_model.shape_dec - - slat.feats = slat.feats.to(next(decoder.parameters()).dtype) - hr_coords = decoder.upsample(slat, upsample_times=4) - lr_resolution = 512 - hr_resolution = int(target_resolution) + target_resolution = int(target_resolution) - while True: - quant_coords = torch.cat([ - hr_coords[:, :1], - ((hr_coords[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(), - ], dim=1) - final_coords = quant_coords.unique(dim=0) - num_tokens = final_coords.shape[0] + if coord_counts is None: + feats, coords_512 = flatten_batched_sparse_latent( + shape_latent_512["samples"], + shape_latent_512["coords"], + coord_counts, + ) + feats = feats.to(device) + coords_512 = coords_512.to(device) + print( + "TRELLIS2_UPSAMPLE_INPUT_TRACE", + { + "batch_size": 1, + "coord_rows": [int(coords_512.shape[0])], + "feat_norms": [float(feats.square().sum().detach().cpu().item())], + }, + ) + slat = shape_norm(feats, coords_512) + slat.feats = slat.feats.to(next(decoder.parameters()).dtype) + hr_coords = decoder.upsample(slat, upsample_times=4) - if num_tokens < max_tokens or hr_resolution <= 1024: - break - hr_resolution -= 128 + hr_resolution = target_resolution + while True: + quant_coords = torch.cat([ + hr_coords[:, :1], + ((hr_coords[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(), + ], dim=1) + final_coords = quant_coords.unique(dim=0) + num_tokens = final_coords.shape[0] - return IO.NodeOutput(final_coords,) + if num_tokens < max_tokens or hr_resolution <= 1024: + break + hr_resolution -= 128 + + print( + "TRELLIS2_UPSAMPLE_OUTPUT_TRACE", + { + "batch_size": 1, + "coord_rows": [int(final_coords.shape[0])], + "hr_resolution": int(hr_resolution), + }, + ) + return IO.NodeOutput(final_coords,) + + final_coords_list = [] + items = split_batched_sparse_latent( + shape_latent_512["samples"], + shape_latent_512["coords"], + coord_counts, + ) + log_sparse_batch_trace("TRELLIS2_UPSAMPLE_INPUT_TRACE", items) + decoder_dtype = next(decoder.parameters()).dtype + + output_coord_rows = [] + output_resolutions = [] + for batch_index, (feats_i, coords_i) in enumerate(items): + feats_i = feats_i.to(device) + coords_i = coords_i.to(device).clone() + coords_i[:, 0] = 0 + slat_i = shape_norm(feats_i, coords_i) + slat_i.feats = slat_i.feats.to(decoder_dtype) + hr_coords_i = decoder.upsample(slat_i, upsample_times=4) + + hr_resolution = target_resolution + while True: + quant_coords_i = torch.cat([ + hr_coords_i[:, :1], + ((hr_coords_i[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(), + ], dim=1) + final_coords_i = quant_coords_i.unique(dim=0) + num_tokens = final_coords_i.shape[0] + + if num_tokens < max_tokens or hr_resolution <= 1024: + break + hr_resolution -= 128 + + final_coords_i = final_coords_i.clone() + final_coords_i[:, 0] = batch_index + final_coords_list.append(final_coords_i) + output_coord_rows.append(int(final_coords_i.shape[0])) + output_resolutions.append(int(hr_resolution)) + + print( + "TRELLIS2_UPSAMPLE_OUTPUT_TRACE", + { + "batch_size": len(final_coords_list), + "coord_rows": output_coord_rows, + "hr_resolution": output_resolutions, + }, + ) + + return IO.NodeOutput(torch.cat(final_coords_list, dim=0),) dino_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) dino_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1) @@ -406,6 +568,7 @@ class Trellis2Conditioning(IO.ComfyNode): cond_512_list = [] cond_1024_list = [] + composite_trace = [] for b in range(batch_size): item_image = image[b] @@ -460,6 +623,14 @@ class Trellis2Conditioning(IO.ComfyNode): # to match trellis2 code (quantize -> dequantize) composite_uint8 = (composite_np * 255.0).round().clip(0, 255).astype(np.uint8) + composite_trace.append( + { + "sample_index": int(b), + "shape": list(composite_uint8.shape), + "sum": int(composite_uint8.sum(dtype=np.int64)), + "prefix": composite_uint8[:2, :2, :].reshape(-1).tolist(), + } + ) cropped_pil = Image.fromarray(composite_uint8) @@ -471,6 +642,19 @@ class Trellis2Conditioning(IO.ComfyNode): cond_1024_batched = torch.cat(cond_1024_list, dim=0) neg_cond_batched = torch.zeros_like(cond_512_batched) neg_embeds_batched = torch.zeros_like(cond_1024_batched) + print( + "TRELLIS2_CONDITIONING_TRACE", + { + "batch_size": int(batch_size), + "cond_512_norms": [float(v) for v in cond_512_batched.square().sum(dim=(1, 2)).detach().cpu().tolist()], + "cond_512_sums": [float(v) for v in cond_512_batched.sum(dim=(1, 2)).detach().cpu().tolist()], + "cond_512_prefix": cond_512_batched[:, 0, :8].detach().cpu().tolist(), + "cond_1024_norms": [float(v) for v in cond_1024_batched.square().sum(dim=(1, 2)).detach().cpu().tolist()], + "cond_1024_sums": [float(v) for v in cond_1024_batched.sum(dim=(1, 2)).detach().cpu().tolist()], + "cond_1024_prefix": cond_1024_batched[:, 0, :8].detach().cpu().tolist(), + "composite_trace": composite_trace, + }, + ) positive = [[cond_512_batched, {"embeds": cond_1024_batched}]] negative = [[neg_cond_batched, {"embeds": neg_embeds_batched}]] @@ -509,8 +693,32 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): else: raise ValueError(f"Invalid input to EmptyShapeLatent: {type(structure_or_coords)}") in_channels = 32 - # image like format - latent = torch.randn(1, in_channels, coords.shape[0], 1) + batch_size, coord_counts, max_tokens = infer_batched_coord_layout(coords) + if batch_size == 1: + coord_counts = None + latent = torch.randn(1, in_channels, coords.shape[0], 1) + else: + latent = torch.zeros(batch_size, in_channels, max_tokens, 1) + base_state = torch.random.get_rng_state() + for i in range(batch_size): + count = int(coord_counts[i].item()) + generator = torch.Generator(device="cpu") + generator.set_state(base_state.clone()) + latent_i = torch.randn(1, in_channels, count, 1, generator=generator) + latent[i, :, :count] = latent_i[0] + if coords.shape[0] > 1000: + norms = [float(v) for v in latent.squeeze(-1).square().sum(dim=(1, 2)).detach().cpu().tolist()] + print( + "TRELLIS2_EMPTY_SHAPE_TRACE", + { + "coords_rows": int(coords.shape[0]), + "batch_size": int(batch_size), + "coord_counts": coord_counts.tolist() if coord_counts is not None else None, + "latent_norms": norms, + }, + ) + if coord_counts is not None: + latent.trellis_coord_counts = coord_counts.clone() model = model.clone() model.model_options = model.model_options.copy() if "transformer_options" in model.model_options: @@ -519,11 +727,17 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): model.model_options["transformer_options"] = {} model.model_options["transformer_options"]["coords"] = coords + if coord_counts is not None: + model.model_options["transformer_options"]["coord_counts"] = coord_counts if is_512_pass: model.model_options["transformer_options"]["generation_mode"] = "shape_generation_512" else: model.model_options["transformer_options"]["generation_mode"] = "shape_generation" - return IO.NodeOutput({"samples": latent, "coords": coords, "type": "trellis2"}, model) + output = {"samples": latent, "coords": coords, "type": "trellis2"} + if coord_counts is not None: + output["coord_counts"] = coord_counts + output["batch_index"] = [0] * batch_size + return IO.NodeOutput(output, model) class EmptyTextureLatentTrellis2(IO.ComfyNode): @classmethod @@ -553,10 +767,45 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): coords = structure_or_coords.int() shape_latent = shape_latent["samples"] + batch_size, coord_counts, max_tokens = infer_batched_coord_layout(coords) if shape_latent.ndim == 4: - shape_latent = shape_latent.squeeze(-1).transpose(1, 2).reshape(-1, channels) + if shape_latent.shape[0] != batch_size: + raise ValueError( + f"shape_latent batch {shape_latent.shape[0]} doesn't match coords batch {batch_size}" + ) + shape_latent = shape_latent.squeeze(-1).transpose(1, 2) + if shape_latent.shape[1] < max_tokens: + raise ValueError( + f"shape_latent tokens {shape_latent.shape[1]} can't cover coords max tokens {max_tokens}" + ) - latent = torch.randn(1, channels, coords.shape[0], 1) + if batch_size == 1: + coord_counts = None + latent = torch.randn(1, channels, coords.shape[0], 1) + else: + latent = torch.zeros(batch_size, channels, max_tokens, 1) + base_state = torch.random.get_rng_state() + for i in range(batch_size): + count = int(coord_counts[i].item()) + generator = torch.Generator(device="cpu") + generator.set_state(base_state.clone()) + latent_i = torch.randn(1, channels, count, 1, generator=generator) + latent[i, :, :count] = latent_i[0] + if coords.shape[0] > 1000: + norms = [float(v) for v in latent.squeeze(-1).square().sum(dim=(1, 2)).detach().cpu().tolist()] + shape_norms = [float(v) for v in shape_latent.square().sum(dim=(1, 2)).detach().cpu().tolist()] if shape_latent.ndim == 3 else None + print( + "TRELLIS2_EMPTY_TEXTURE_TRACE", + { + "coords_rows": int(coords.shape[0]), + "batch_size": int(batch_size), + "coord_counts": coord_counts.tolist() if coord_counts is not None else None, + "latent_norms": norms, + "shape_latent_norms": shape_norms, + }, + ) + if coord_counts is not None: + latent.trellis_coord_counts = coord_counts.clone() model = model.clone() model.model_options = model.model_options.copy() if "transformer_options" in model.model_options: @@ -565,9 +814,15 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): model.model_options["transformer_options"] = {} model.model_options["transformer_options"]["coords"] = coords + if coord_counts is not None: + model.model_options["transformer_options"]["coord_counts"] = coord_counts model.model_options["transformer_options"]["generation_mode"] = "texture_generation" model.model_options["transformer_options"]["shape_slat"] = shape_latent - return IO.NodeOutput({"samples": latent, "coords": coords, "type": "trellis2"}, model) + output = {"samples": latent, "coords": coords, "type": "trellis2"} + if coord_counts is not None: + output["coord_counts"] = coord_counts + output["batch_index"] = [0] * batch_size + return IO.NodeOutput(output, model) class EmptyStructureLatentTrellis2(IO.ComfyNode): @@ -587,8 +842,15 @@ class EmptyStructureLatentTrellis2(IO.ComfyNode): def execute(cls, batch_size): in_channels = 8 resolution = 16 - latent = torch.randn(batch_size, in_channels, resolution, resolution, resolution) - return IO.NodeOutput({"samples": latent, "type": "trellis2"}) + generator = torch.Generator(device="cpu") + generator.manual_seed(11426) + latent = torch.randn(1, in_channels, resolution, resolution, resolution, generator=generator).repeat(batch_size, 1, 1, 1, 1) + norms = [float(v) for v in latent.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()] + print("TRELLIS2_EMPTY_STRUCTURE_TRACE", {"batch_size": int(batch_size), "latent_norms": norms}) + output = {"samples": latent, "type": "trellis2"} + if batch_size > 1: + output["batch_index"] = [0] * batch_size + return IO.NodeOutput(output) def simplify_fn(vertices, faces, colors=None, target=100000): if vertices.ndim == 3: From 49c1adeed6c91b790bf1ab87dffbec6a6e1eae6f Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 12:15:49 -0500 Subject: [PATCH 2/8] Fix Trellis PR review regressions --- comfy/ldm/trellis2/model.py | 86 -------------------- comfy/sample.py | 29 +++++-- comfy_extras/nodes_trellis2.py | 139 +++++++++++---------------------- 3 files changed, 68 insertions(+), 186 deletions(-) diff --git a/comfy/ldm/trellis2/model.py b/comfy/ldm/trellis2/model.py index 76dbacc93..f61c50629 100644 --- a/comfy/ldm/trellis2/model.py +++ b/comfy/ldm/trellis2/model.py @@ -829,21 +829,6 @@ class Trellis2(nn.Module): t_eval = timestep c_eval = context - x_eval_norms = [float(v) for v in x_eval.square().sum(dim=(1, 2)).detach().cpu().tolist()] - c_eval_norms = [float(v) for v in c_eval.square().sum(dim=(1, 2)).detach().cpu().tolist()] - print( - "TRELLIS2_NOT_STRUCT_INPUT_TRACE", - { - "mode": mode, - "orig_bsz": int(orig_bsz), - "logical_batch": int(logical_batch), - "rule": bool(rule), - "coord_counts": coord_counts.tolist() if coord_counts is not None else None, - "x_eval_norms": x_eval_norms, - "c_eval_norms": c_eval_norms, - }, - ) - B, N, C = x_eval.shape if mode in ["shape_generation", "texture_generation"]: @@ -878,16 +863,6 @@ class Trellis2(nn.Module): coord_batches.append(coords_rep) index_batch.append(out_index) - print( - "TRELLIS2_GROUPED_INPUT_TRACE", - { - "mode": mode, - "sample_index": int(i), - "coord_count": int(count), - "feat_norms": [float(v.square().sum().detach().cpu().item()) for v in feat_batches], - }, - ) - x_st_i = SparseTensor( feats=torch.cat(feat_batches, dim=0), coords=torch.cat(coord_batches, dim=0).to(torch.int32), @@ -972,16 +947,6 @@ class Trellis2(nn.Module): active_coord_counts.append(count) out_channels = sparse_outs[0].shape[-1] - sparse_out_norms = [float(feats.square().sum().detach().cpu().item()) for feats in sparse_outs] - print( - "TRELLIS2_SPARSE_OUT_TRACE", - { - "mode": mode, - "coords_rows": int(coords.shape[0]), - "active_coord_counts": active_coord_counts, - "sparse_out_norms": sparse_out_norms, - }, - ) padded = sparse_outs[0].new_zeros((B, N, out_channels)) for out_index, (count, feats_i) in enumerate(zip(active_coord_counts, sparse_outs)): padded[out_index, :count] = feats_i @@ -1060,20 +1025,6 @@ class Trellis2(nn.Module): cond_or_uncond = transformer_options.get("cond_or_uncond") or [] batch_groups = len(cond_or_uncond) if len(cond_or_uncond) > 0 and orig_bsz % len(cond_or_uncond) == 0 else 1 logical_batch = orig_bsz // batch_groups - print( - "TRELLIS2_STRUCTURE_INPUT_TRACE", - { - "orig_bsz": int(orig_bsz), - "batch_groups": int(batch_groups), - "logical_batch": int(logical_batch), - "cond_or_uncond": cond_or_uncond, - "x_norms": [float(v) for v in x.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], - "x_sums": [float(v) for v in x.sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], - "c_norms": [float(v) for v in context.square().sum(dim=(1, 2)).detach().cpu().tolist()], - "c_sums": [float(v) for v in context.sum(dim=(1, 2)).detach().cpu().tolist()], - }, - ) - if logical_batch > 1: x_groups = x.reshape(batch_groups, logical_batch, *x.shape[1:]) if timestep.shape[0] > 1: @@ -1088,10 +1039,6 @@ class Trellis2(nn.Module): selected_group_indices = list(range(batch_groups)) out_groups = [] - selected_x_norms = [] - selected_x_sums = [] - selected_c_norms = [] - selected_c_sums = [] for sample_index in range(logical_batch): if shape_rule and batch_groups > 1: half = orig_bsz // 2 @@ -1111,23 +1058,8 @@ class Trellis2(nn.Module): else: t_i = timestep c_i = c_groups[selected_group_indices, sample_index] - selected_x_norms.extend(float(v) for v in x_i.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()) - selected_x_sums.extend(float(v) for v in x_i.sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()) - selected_c_norms.extend(float(v) for v in c_i.square().sum(dim=(1, 2)).detach().cpu().tolist()) - selected_c_sums.extend(float(v) for v in c_i.sum(dim=(1, 2)).detach().cpu().tolist()) out_groups.append(self.structure_model(x_i, t_i, c_i)) - print( - "TRELLIS2_STRUCTURE_SELECTED_TRACE", - { - "selected_group_indices": selected_group_indices, - "selected_x_norms": selected_x_norms, - "selected_x_sums": selected_x_sums, - "selected_c_norms": selected_c_norms, - "selected_c_sums": selected_c_sums, - }, - ) - out = out_groups[0].new_zeros((orig_bsz, *out_groups[0].shape[1:])) for sample_index, out_sample in enumerate(out_groups): if shape_rule and batch_groups > 1: @@ -1146,28 +1078,10 @@ class Trellis2(nn.Module): if shape_rule and orig_bsz > 1: out = out.repeat(2, 1, 1, 1, 1) - print( - "TRELLIS2_STRUCTURE_OUTPUT_TRACE", - { - "out_norms": [float(v) for v in out.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], - "out_sums": [float(v) for v in out.sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()], - }, - ) - if not_struct_mode: if dense_out is None: out = out.feats out = out.view(B, N, -1).transpose(1, 2).unsqueeze(-1) if rule and orig_bsz > B: out = out.repeat(orig_bsz // B, 1, 1, 1) - print( - "TRELLIS2_DENSE_OUT_TRACE", - { - "mode": mode, - "coords_rows": int(coords.shape[0]) if coords is not None else None, - "output_shape": list(out.shape), - "output_norms": [float(v) for v in out.squeeze(-1).square().sum(dim=(1, 2)).detach().cpu().tolist()], - "coord_counts": coord_counts.tolist() if coord_counts is not None else None, - }, - ) return out diff --git a/comfy/sample.py b/comfy/sample.py index 3967fba1b..7251aa799 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -10,18 +10,37 @@ def prepare_noise_inner(latent_image, generator, noise_inds=None): coord_counts = getattr(latent_image, "trellis_coord_counts", None) if coord_counts is not None: noise = torch.zeros(latent_image.size(), dtype=torch.float32, layout=latent_image.layout, device="cpu") - base_state = generator.get_state() - for i, count in enumerate(coord_counts.tolist()): + if noise_inds is None: + noise_inds = np.arange(latent_image.size(0), dtype=np.int64) + else: + noise_inds = np.asarray(noise_inds, dtype=np.int64) + + unique_inds = np.unique(noise_inds) + first_indices = {int(unique_index): int(np.flatnonzero(noise_inds == unique_index)[0]) for unique_index in unique_inds.tolist()} + index_states = {} + for unique_index in sorted(first_indices): + index_states[unique_index] = generator.get_state().clone() + count = int(coord_counts[first_indices[unique_index]].item()) + torch.randn( + [1, latent_image.size(1), count, latent_image.size(3)], + dtype=torch.float32, + layout=latent_image.layout, + generator=generator, + device="cpu", + ) + + for batch_index, noise_index in enumerate(noise_inds.tolist()): + count = int(coord_counts[batch_index].item()) local_generator = torch.Generator(device="cpu") - local_generator.set_state(base_state.clone()) + local_generator.set_state(index_states[int(noise_index)].clone()) sample_noise = torch.randn( - [1, latent_image.size(1), int(count), latent_image.size(3)], + [1, latent_image.size(1), count, latent_image.size(3)], dtype=torch.float32, layout=latent_image.layout, generator=local_generator, device="cpu", ) - noise[i:i + 1, :, :int(count), :] = sample_noise + noise[batch_index:batch_index + 1, :, :count, :] = sample_noise return noise.to(dtype=latent_image.dtype) if noise_inds is None: diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index 26cb135e7..621cc9586 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -148,18 +148,6 @@ def split_batched_sparse_latent(samples, coords, coord_counts): return items -def log_sparse_batch_trace(tag, items): - feat_norms = [float(feats.square().sum().detach().cpu().item()) for feats, _ in items] - coord_rows = [int(coords_i.shape[0]) for _, coords_i in items] - print( - tag, - { - "batch_size": len(items), - "coord_rows": coord_rows, - "feat_norms": feat_norms, - }, - ) - def paint_mesh_with_voxels(mesh, voxel_coords, voxel_colors, resolution): """ Generic function to paint a mesh using nearest-neighbor colors from a sparse voxel field. @@ -410,14 +398,6 @@ class Trellis2UpsampleCascade(IO.ComfyNode): ) feats = feats.to(device) coords_512 = coords_512.to(device) - print( - "TRELLIS2_UPSAMPLE_INPUT_TRACE", - { - "batch_size": 1, - "coord_rows": [int(coords_512.shape[0])], - "feat_norms": [float(feats.square().sum().detach().cpu().item())], - }, - ) slat = shape_norm(feats, coords_512) slat.feats = slat.feats.to(next(decoder.parameters()).dtype) hr_coords = decoder.upsample(slat, upsample_times=4) @@ -435,27 +415,18 @@ class Trellis2UpsampleCascade(IO.ComfyNode): break hr_resolution -= 128 - print( - "TRELLIS2_UPSAMPLE_OUTPUT_TRACE", - { - "batch_size": 1, - "coord_rows": [int(final_coords.shape[0])], - "hr_resolution": int(hr_resolution), - }, - ) return IO.NodeOutput(final_coords,) - final_coords_list = [] items = split_batched_sparse_latent( shape_latent_512["samples"], shape_latent_512["coords"], coord_counts, ) - log_sparse_batch_trace("TRELLIS2_UPSAMPLE_INPUT_TRACE", items) decoder_dtype = next(decoder.parameters()).dtype - output_coord_rows = [] + final_coords_list = [] output_resolutions = [] + output_coord_counts = [] for batch_index, (feats_i, coords_i) in enumerate(items): feats_i = feats_i.to(device) coords_i = coords_i.to(device).clone() @@ -480,19 +451,14 @@ class Trellis2UpsampleCascade(IO.ComfyNode): final_coords_i = final_coords_i.clone() final_coords_i[:, 0] = batch_index final_coords_list.append(final_coords_i) - output_coord_rows.append(int(final_coords_i.shape[0])) output_resolutions.append(int(hr_resolution)) + output_coord_counts.append(int(final_coords_i.shape[0])) - print( - "TRELLIS2_UPSAMPLE_OUTPUT_TRACE", - { - "batch_size": len(final_coords_list), - "coord_rows": output_coord_rows, - "hr_resolution": output_resolutions, - }, - ) - - return IO.NodeOutput(torch.cat(final_coords_list, dim=0),) + return IO.NodeOutput({ + "coords": torch.cat(final_coords_list, dim=0), + "coord_counts": torch.tensor(output_coord_counts, dtype=torch.int64), + "resolutions": torch.tensor(output_resolutions, dtype=torch.int64), + },) dino_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) dino_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1) @@ -568,7 +534,6 @@ class Trellis2Conditioning(IO.ComfyNode): cond_512_list = [] cond_1024_list = [] - composite_trace = [] for b in range(batch_size): item_image = image[b] @@ -623,14 +588,6 @@ class Trellis2Conditioning(IO.ComfyNode): # to match trellis2 code (quantize -> dequantize) composite_uint8 = (composite_np * 255.0).round().clip(0, 255).astype(np.uint8) - composite_trace.append( - { - "sample_index": int(b), - "shape": list(composite_uint8.shape), - "sum": int(composite_uint8.sum(dtype=np.int64)), - "prefix": composite_uint8[:2, :2, :].reshape(-1).tolist(), - } - ) cropped_pil = Image.fromarray(composite_uint8) @@ -642,19 +599,6 @@ class Trellis2Conditioning(IO.ComfyNode): cond_1024_batched = torch.cat(cond_1024_list, dim=0) neg_cond_batched = torch.zeros_like(cond_512_batched) neg_embeds_batched = torch.zeros_like(cond_1024_batched) - print( - "TRELLIS2_CONDITIONING_TRACE", - { - "batch_size": int(batch_size), - "cond_512_norms": [float(v) for v in cond_512_batched.square().sum(dim=(1, 2)).detach().cpu().tolist()], - "cond_512_sums": [float(v) for v in cond_512_batched.sum(dim=(1, 2)).detach().cpu().tolist()], - "cond_512_prefix": cond_512_batched[:, 0, :8].detach().cpu().tolist(), - "cond_1024_norms": [float(v) for v in cond_1024_batched.square().sum(dim=(1, 2)).detach().cpu().tolist()], - "cond_1024_sums": [float(v) for v in cond_1024_batched.sum(dim=(1, 2)).detach().cpu().tolist()], - "cond_1024_prefix": cond_1024_batched[:, 0, :8].detach().cpu().tolist(), - "composite_trace": composite_trace, - }, - ) positive = [[cond_512_batched, {"embeds": cond_1024_batched}]] negative = [[neg_cond_batched, {"embeds": neg_embeds_batched}]] @@ -680,12 +624,20 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): def execute(cls, structure_or_coords, model): # to accept the upscaled coords is_512_pass = False + coord_counts = None + coord_resolutions = None if hasattr(structure_or_coords, "data") and structure_or_coords.data.ndim == 4: decoded = structure_or_coords.data.unsqueeze(1) coords = torch.argwhere(decoded.bool())[:, [0, 2, 3, 4]].int() is_512_pass = True + elif isinstance(structure_or_coords, dict): + coords = structure_or_coords["coords"].int() + coord_counts = structure_or_coords.get("coord_counts") + coord_resolutions = structure_or_coords.get("resolutions") + is_512_pass = False + elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2: coords = structure_or_coords.int() is_512_pass = False @@ -693,7 +645,15 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): else: raise ValueError(f"Invalid input to EmptyShapeLatent: {type(structure_or_coords)}") in_channels = 32 - batch_size, coord_counts, max_tokens = infer_batched_coord_layout(coords) + batch_size, inferred_coord_counts, max_tokens = infer_batched_coord_layout(coords) + if coord_counts is not None: + coord_counts = coord_counts.to(dtype=torch.int64, device=coords.device) + if coord_counts.shape != inferred_coord_counts.shape or not torch.equal(coord_counts, inferred_coord_counts): + raise ValueError( + f"Trellis2 coord_counts metadata {coord_counts.tolist()} does not match coords layout {inferred_coord_counts.tolist()}" + ) + else: + coord_counts = inferred_coord_counts if batch_size == 1: coord_counts = None latent = torch.randn(1, in_channels, coords.shape[0], 1) @@ -706,17 +666,6 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): generator.set_state(base_state.clone()) latent_i = torch.randn(1, in_channels, count, 1, generator=generator) latent[i, :, :count] = latent_i[0] - if coords.shape[0] > 1000: - norms = [float(v) for v in latent.squeeze(-1).square().sum(dim=(1, 2)).detach().cpu().tolist()] - print( - "TRELLIS2_EMPTY_SHAPE_TRACE", - { - "coords_rows": int(coords.shape[0]), - "batch_size": int(batch_size), - "coord_counts": coord_counts.tolist() if coord_counts is not None else None, - "latent_norms": norms, - }, - ) if coord_counts is not None: latent.trellis_coord_counts = coord_counts.clone() model = model.clone() @@ -729,6 +678,8 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): model.model_options["transformer_options"]["coords"] = coords if coord_counts is not None: model.model_options["transformer_options"]["coord_counts"] = coord_counts + if coord_resolutions is not None: + model.model_options["transformer_options"]["coord_resolutions"] = coord_resolutions if is_512_pass: model.model_options["transformer_options"]["generation_mode"] = "shape_generation_512" else: @@ -736,6 +687,8 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): output = {"samples": latent, "coords": coords, "type": "trellis2"} if coord_counts is not None: output["coord_counts"] = coord_counts + if coord_resolutions is not None: + output["coord_resolutions"] = coord_resolutions output["batch_index"] = [0] * batch_size return IO.NodeOutput(output, model) @@ -759,15 +712,28 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): @classmethod def execute(cls, structure_or_coords, shape_latent, model): channels = 32 + coord_counts = None if hasattr(structure_or_coords, "data") and structure_or_coords.data.ndim == 4: decoded = structure_or_coords.data.unsqueeze(1) coords = torch.argwhere(decoded.bool())[:, [0, 2, 3, 4]].int() + elif isinstance(structure_or_coords, dict): + coords = structure_or_coords["coords"].int() + coord_counts = structure_or_coords.get("coord_counts") + elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2: coords = structure_or_coords.int() shape_latent = shape_latent["samples"] - batch_size, coord_counts, max_tokens = infer_batched_coord_layout(coords) + batch_size, inferred_coord_counts, max_tokens = infer_batched_coord_layout(coords) + if coord_counts is not None: + coord_counts = coord_counts.to(dtype=torch.int64, device=coords.device) + if coord_counts.shape != inferred_coord_counts.shape or not torch.equal(coord_counts, inferred_coord_counts): + raise ValueError( + f"Trellis2 coord_counts metadata {coord_counts.tolist()} does not match coords layout {inferred_coord_counts.tolist()}" + ) + else: + coord_counts = inferred_coord_counts if shape_latent.ndim == 4: if shape_latent.shape[0] != batch_size: raise ValueError( @@ -791,19 +757,6 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): generator.set_state(base_state.clone()) latent_i = torch.randn(1, channels, count, 1, generator=generator) latent[i, :, :count] = latent_i[0] - if coords.shape[0] > 1000: - norms = [float(v) for v in latent.squeeze(-1).square().sum(dim=(1, 2)).detach().cpu().tolist()] - shape_norms = [float(v) for v in shape_latent.square().sum(dim=(1, 2)).detach().cpu().tolist()] if shape_latent.ndim == 3 else None - print( - "TRELLIS2_EMPTY_TEXTURE_TRACE", - { - "coords_rows": int(coords.shape[0]), - "batch_size": int(batch_size), - "coord_counts": coord_counts.tolist() if coord_counts is not None else None, - "latent_norms": norms, - "shape_latent_norms": shape_norms, - }, - ) if coord_counts is not None: latent.trellis_coord_counts = coord_counts.clone() model = model.clone() @@ -842,11 +795,7 @@ class EmptyStructureLatentTrellis2(IO.ComfyNode): def execute(cls, batch_size): in_channels = 8 resolution = 16 - generator = torch.Generator(device="cpu") - generator.manual_seed(11426) - latent = torch.randn(1, in_channels, resolution, resolution, resolution, generator=generator).repeat(batch_size, 1, 1, 1, 1) - norms = [float(v) for v in latent.square().sum(dim=(1, 2, 3, 4)).detach().cpu().tolist()] - print("TRELLIS2_EMPTY_STRUCTURE_TRACE", {"batch_size": int(batch_size), "latent_norms": norms}) + latent = torch.randn(1, in_channels, resolution, resolution, resolution).repeat(batch_size, 1, 1, 1, 1) output = {"samples": latent, "type": "trellis2"} if batch_size > 1: output["batch_index"] = [0] * batch_size From 7d98cc1305612becdf0baa734997f84eb296a49d Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 14:29:07 -0500 Subject: [PATCH 3/8] Fix Trellis seeded sparse batch semantics --- comfy/ldm/trellis2/model.py | 183 +++++++----------- comfy/sample.py | 34 ++-- comfy_extras/nodes_trellis2.py | 161 ++++++++++----- .../comfy_extras_test/nodes_trellis2_test.py | 83 ++++++++ tests-unit/comfy_test/sample_test.py | 47 +++++ 5 files changed, 333 insertions(+), 175 deletions(-) create mode 100644 tests-unit/comfy_test/sample_test.py diff --git a/comfy/ldm/trellis2/model.py b/comfy/ldm/trellis2/model.py index f61c50629..15939e5c6 100644 --- a/comfy/ldm/trellis2/model.py +++ b/comfy/ldm/trellis2/model.py @@ -813,6 +813,14 @@ class Trellis2(nn.Module): shape_rule = sigmas < self.guidance_interval[0] or sigmas > self.guidance_interval[1] txt_rule = sigmas < self.guidance_interval_txt[0] or sigmas > self.guidance_interval_txt[1] dense_out = None + cond_or_uncond = transformer_options.get("cond_or_uncond") or [] + + def cond_group_indices(batch_groups): + if len(cond_or_uncond) == batch_groups: + cond_groups = [i for i, marker in enumerate(cond_or_uncond) if marker == 0] + if len(cond_groups) > 0: + return cond_groups + return [batch_groups - 1] if not_struct_mode: orig_bsz = x.shape[0] @@ -820,10 +828,17 @@ class Trellis2(nn.Module): logical_batch = coord_counts.shape[0] if coord_counts is not None else 1 if rule and orig_bsz > logical_batch: - half = orig_bsz // 2 - x_eval = x[half:] - t_eval = timestep[half:] if timestep.shape[0] > 1 else timestep - c_eval = cond + batch_groups = orig_bsz // logical_batch + selected_groups = cond_group_indices(batch_groups) + x_groups = x.reshape(batch_groups, logical_batch, *x.shape[1:]) + x_eval = x_groups[selected_groups].reshape(-1, *x.shape[1:]) + if timestep.shape[0] > 1: + t_groups = timestep.reshape(batch_groups, logical_batch, *timestep.shape[1:]) + t_eval = t_groups[selected_groups].reshape(-1, *timestep.shape[1:]) + else: + t_eval = timestep + c_groups = context.reshape(batch_groups, logical_batch, *context.shape[1:]) + c_eval = c_groups[selected_groups].reshape(-1, *context.shape[1:]) else: x_eval = x t_eval = timestep @@ -838,113 +853,62 @@ class Trellis2(nn.Module): raise ValueError( f"Trellis2 coord_counts batch {logical_batch} doesn't divide latent batch {B}" ) + batch_ids = coords[:, 0].to(torch.int64) + order = torch.argsort(batch_ids, stable=True) + sorted_coords = coords.index_select(0, order) + sorted_batch_ids = batch_ids.index_select(0, order) + offsets = coord_counts.cumsum(0) - coord_counts + coords_by_batch = [] + for i in range(logical_batch): + count = int(coord_counts[i].item()) + start = int(offsets[i].item()) + coords_i = sorted_coords[start:start + count] + ids_i = sorted_batch_ids[start:start + count] + if coords_i.shape[0] != count or not torch.all(ids_i == i): + raise ValueError( + f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}" + ) + coords_by_batch.append(coords_i) repeat_factor = B // logical_batch sparse_outs = [] active_coord_counts = [] - if mode == "shape_generation" and repeat_factor > 1: - grouped_outs = [] - grouped_counts = [] + for rep in range(repeat_factor): for i in range(logical_batch): + out_index = rep * logical_batch + i count = int(coord_counts[i].item()) - coords_i = coords[coords[:, 0] == i].clone() - if coords_i.shape[0] != count: - raise ValueError( - f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}" - ) + coords_i = coords_by_batch[i].clone() + coords_i[:, 0] = 0 + feats_i = x_eval[out_index, :count].clone() + x_st_i = SparseTensor(feats=feats_i, coords=coords_i.to(torch.int32)) + t_i = t_eval[out_index].unsqueeze(0).clone() if t_eval.shape[0] > 1 else t_eval + c_i = c_eval[out_index].unsqueeze(0).clone() if c_eval.shape[0] > 1 else c_eval - feat_batches = [] - coord_batches = [] - index_batch = [] - for rep in range(repeat_factor): - out_index = rep * logical_batch + i - feat_batches.append(x_eval[out_index, :count]) - coords_rep = coords_i.clone() - coords_rep[:, 0] = rep - coord_batches.append(coords_rep) - index_batch.append(out_index) - - x_st_i = SparseTensor( - feats=torch.cat(feat_batches, dim=0), - coords=torch.cat(coord_batches, dim=0).to(torch.int32), - ) - index_tensor = torch.tensor(index_batch, device=x_eval.device, dtype=torch.long) - if t_eval.shape[0] > 1: - t_i = t_eval.index_select(0, index_tensor) - else: - t_i = t_eval - if c_eval.shape[0] > 1: - c_i = c_eval.index_select(0, index_tensor) - else: - c_i = c_eval - - if is_512_run: - sparse_out = self.img2shape_512(x_st_i, t_i, c_i) - else: - sparse_out = self.img2shape(x_st_i, t_i, c_i) - - feats_group, coords_group = sparse_out.to_tensor_list() - if len(feats_group) != repeat_factor: - raise ValueError( - f"Trellis2 expected {repeat_factor} sparse output groups for batch {i}, got {len(feats_group)}" - ) - for rep, (feats_rep, coords_rep) in enumerate(zip(feats_group, coords_group)): - if feats_rep.shape[0] != count: - raise ValueError( - f"Trellis2 sparse output rows for batch {i} rep {rep} expected {count}, got {feats_rep.shape[0]}" - ) - if coords_rep.shape[0] != count: - raise ValueError( - f"Trellis2 sparse output coords for batch {i} rep {rep} expected {count}, got {coords_rep.shape[0]}" - ) - grouped_outs.append(feats_group) - grouped_counts.append(count) - - for rep in range(repeat_factor): - for i in range(logical_batch): - sparse_outs.append(grouped_outs[i][rep]) - active_coord_counts.append(grouped_counts[i]) - else: - for rep in range(repeat_factor): - for i in range(logical_batch): - out_index = rep * logical_batch + i - count = int(coord_counts[i].item()) - coords_i = coords[coords[:, 0] == i].clone() - if coords_i.shape[0] != count: - raise ValueError( - f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}" - ) - coords_i[:, 0] = 0 - feats_i = x_eval[out_index, :count] - x_st_i = SparseTensor(feats=feats_i, coords=coords_i.to(torch.int32)) - t_i = t_eval[out_index].unsqueeze(0) if t_eval.shape[0] > 1 else t_eval - c_i = c_eval[out_index].unsqueeze(0) if c_eval.shape[0] > 1 else c_eval - - if mode == "shape_generation": - if is_512_run: - sparse_out = self.img2shape_512(x_st_i, t_i, c_i) - else: - sparse_out = self.img2shape(x_st_i, t_i, c_i) + if mode == "shape_generation": + if is_512_run: + sparse_out = self.img2shape_512(x_st_i, t_i, c_i) else: - slat = transformer_options.get("shape_slat") - if slat is None: - raise ValueError("shape_slat can't be None") - if slat.ndim == 3: - if slat.shape[0] != logical_batch: - raise ValueError( - f"shape_slat batch {slat.shape[0]} doesn't match coord_counts batch {logical_batch}" - ) - if slat.shape[1] < count: - raise ValueError( - f"shape_slat tokens {slat.shape[1]} can't cover coord count {count} for batch {i}" - ) - slat_feats = slat[i, :count].to(x_st_i.device) - else: - slat_feats = slat[:count].to(x_st_i.device) - x_st_i = x_st_i.replace(feats=torch.cat([x_st_i.feats, slat_feats], dim=-1)) - sparse_out = self.shape2txt(x_st_i, t_i, c_i) + sparse_out = self.img2shape(x_st_i, t_i, c_i) + else: + slat = transformer_options.get("shape_slat") + if slat is None: + raise ValueError("shape_slat can't be None") + if slat.ndim == 3: + if slat.shape[0] != logical_batch: + raise ValueError( + f"shape_slat batch {slat.shape[0]} doesn't match coord_counts batch {logical_batch}" + ) + if slat.shape[1] < count: + raise ValueError( + f"shape_slat tokens {slat.shape[1]} can't cover coord count {count} for batch {i}" + ) + slat_feats = slat[i, :count].to(x_st_i.device) + else: + slat_feats = slat[:count].to(x_st_i.device) + x_st_i = x_st_i.replace(feats=torch.cat([x_st_i.feats, slat_feats], dim=-1)) + sparse_out = self.shape2txt(x_st_i, t_i, c_i) - sparse_outs.append(sparse_out.feats) - active_coord_counts.append(count) + sparse_outs.append(sparse_out.feats) + active_coord_counts.append(count) out_channels = sparse_outs[0].shape[-1] padded = sparse_outs[0].new_zeros((B, N, out_channels)) @@ -1022,7 +986,6 @@ class Trellis2(nn.Module): out = self.shape2txt(x_st, t_eval, c_eval) else: # structure orig_bsz = x.shape[0] - cond_or_uncond = transformer_options.get("cond_or_uncond") or [] batch_groups = len(cond_or_uncond) if len(cond_or_uncond) > 0 and orig_bsz % len(cond_or_uncond) == 0 else 1 logical_batch = orig_bsz // batch_groups if logical_batch > 1: @@ -1034,23 +997,19 @@ class Trellis2(nn.Module): c_groups = context.reshape(batch_groups, logical_batch, *context.shape[1:]) if shape_rule and batch_groups > 1: - selected_group_indices = [batch_groups - 1] + selected_group_indices = cond_group_indices(batch_groups) else: selected_group_indices = list(range(batch_groups)) out_groups = [] for sample_index in range(logical_batch): if shape_rule and batch_groups > 1: - half = orig_bsz // 2 - x_i = x[half + sample_index].unsqueeze(0) + x_i = x_groups[selected_group_indices, sample_index] if timestep.shape[0] > 1: - t_i = timestep[half + sample_index].unsqueeze(0) + t_i = t_groups[selected_group_indices, sample_index] else: t_i = timestep - if cond.shape[0] > 1: - c_i = cond[sample_index].unsqueeze(0) - else: - c_i = cond + c_i = c_groups[selected_group_indices, sample_index] else: x_i = x_groups[selected_group_indices, sample_index] if timestep.shape[0] > 1: diff --git a/comfy/sample.py b/comfy/sample.py index 7251aa799..6fba221ed 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -15,32 +15,26 @@ def prepare_noise_inner(latent_image, generator, noise_inds=None): else: noise_inds = np.asarray(noise_inds, dtype=np.int64) + base_seed = int(generator.initial_seed()) unique_inds = np.unique(noise_inds) - first_indices = {int(unique_index): int(np.flatnonzero(noise_inds == unique_index)[0]) for unique_index in unique_inds.tolist()} - index_states = {} - for unique_index in sorted(first_indices): - index_states[unique_index] = generator.get_state().clone() - count = int(coord_counts[first_indices[unique_index]].item()) - torch.randn( - [1, latent_image.size(1), count, latent_image.size(3)], - dtype=torch.float32, - layout=latent_image.layout, - generator=generator, - device="cpu", - ) - - for batch_index, noise_index in enumerate(noise_inds.tolist()): - count = int(coord_counts[batch_index].item()) + sample_noises = {} + for noise_index in unique_inds.tolist(): + rows = np.flatnonzero(noise_inds == noise_index) + max_count = max(int(coord_counts[row].item()) for row in rows.tolist()) local_generator = torch.Generator(device="cpu") - local_generator.set_state(index_states[int(noise_index)].clone()) - sample_noise = torch.randn( - [1, latent_image.size(1), count, latent_image.size(3)], + local_generator.manual_seed(base_seed + int(noise_index)) + sample_noises[int(noise_index)] = torch.randn( + [1, latent_image.size(1), max_count, latent_image.size(3)], dtype=torch.float32, layout=latent_image.layout, generator=local_generator, device="cpu", ) - noise[batch_index:batch_index + 1, :, :count, :] = sample_noise + + for batch_index, noise_index in enumerate(noise_inds.tolist()): + count = int(coord_counts[batch_index].item()) + sample_noise = sample_noises[int(noise_index)] + noise[batch_index:batch_index + 1, :, :count, :] = sample_noise[:, :, :count, :] return noise.to(dtype=latent_image.dtype) if noise_inds is None: @@ -76,6 +70,8 @@ def prepare_noise(latent_image, seed, noise_inds=None): def fix_empty_latent_channels(model, latent_image, downscale_ratio_spacial=None): if latent_image.is_nested: return latent_image + if getattr(latent_image, "trellis_skip_empty_fix", False): + return latent_image latent_format = model.get_model_object("latent_format") #Resize the empty latent image so it has the right number of channels if torch.count_nonzero(latent_image) == 0: if latent_format.latent_channels != latent_image.shape[1]: diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index 621cc9586..6556ed176 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -115,18 +115,54 @@ def infer_batched_coord_layout(coords): return batch_size, counts, max_tokens +def split_batched_coords(coords, coord_counts): + batch_ids = coords[:, 0].to(torch.int64) + order = torch.argsort(batch_ids, stable=True) + sorted_coords = coords.index_select(0, order) + sorted_batch_ids = batch_ids.index_select(0, order) + + offsets = coord_counts.cumsum(0) - coord_counts + items = [] + for i in range(coord_counts.shape[0]): + count = int(coord_counts[i].item()) + start = int(offsets[i].item()) + coords_i = sorted_coords[start:start + count] + ids_i = sorted_batch_ids[start:start + count] + if coords_i.shape[0] != count or not torch.all(ids_i == i): + raise ValueError(f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}") + items.append(coords_i) + return items + + +def normalize_batch_index(batch_index): + if batch_index is None: + return None + if isinstance(batch_index, int): + return [int(batch_index)] + return list(batch_index) + + +def resolve_sample_indices(batch_index, batch_size): + sample_indices = normalize_batch_index(batch_index) + if sample_indices is None: + return list(range(batch_size)) + if len(sample_indices) != batch_size: + raise ValueError( + f"Trellis2 batch_index length {len(sample_indices)} does not match batch size {batch_size}" + ) + return sample_indices + + def flatten_batched_sparse_latent(samples, coords, coord_counts): samples = samples.squeeze(-1).transpose(1, 2) if coord_counts is None: return samples.reshape(-1, samples.shape[-1]), coords + coords_items = split_batched_coords(coords, coord_counts) feat_list = [] coord_list = [] - for i in range(coord_counts.shape[0]): + for i, coords_i in enumerate(coords_items): count = int(coord_counts[i].item()) - coords_i = coords[coords[:, 0] == i] - if coords_i.shape[0] != count: - raise ValueError(f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}") feat_list.append(samples[i, :count]) coord_list.append(coords_i) @@ -138,12 +174,10 @@ def split_batched_sparse_latent(samples, coords, coord_counts): if coord_counts is None: return [(samples.reshape(-1, samples.shape[-1]), coords)] + coords_items = split_batched_coords(coords, coord_counts) items = [] - for i in range(coord_counts.shape[0]): + for i, coords_i in enumerate(coords_items): count = int(coord_counts[i].item()) - coords_i = coords[coords[:, 0] == i] - if coords_i.shape[0] != count: - raise ValueError(f"Trellis2 coords rows for batch {i} expected {count}, got {coords_i.shape[0]}") items.append((samples[i, :count], coords_i)) return items @@ -345,6 +379,7 @@ class VaeDecodeStructureTrellis2(IO.ComfyNode): load_device = comfy.model_management.get_torch_device() offload_device = comfy.model_management.vae_offload_device() decoder = decoder.to(load_device) + batch_index = normalize_batch_index(samples.get("batch_index")) samples = samples["samples"] samples = samples.to(load_device) if samples.shape[0] > 1: @@ -361,6 +396,8 @@ class VaeDecodeStructureTrellis2(IO.ComfyNode): ratio = current_res // resolution decoded = torch.nn.functional.max_pool3d(decoded.float(), ratio, ratio, 0) > 0.5 out = Types.VOXEL(decoded.squeeze(1).float()) + if batch_index is not None: + out.batch_index = normalize_batch_index(batch_index) return IO.NodeOutput(out) class Trellis2UpsampleCascade(IO.ComfyNode): @@ -386,6 +423,7 @@ class Trellis2UpsampleCascade(IO.ComfyNode): comfy.model_management.load_model_gpu(vae.patcher) coord_counts = shape_latent_512.get("coord_counts") + batch_index = normalize_batch_index(shape_latent_512.get("batch_index")) decoder = vae.first_stage_model.shape_dec lr_resolution = 512 target_resolution = int(target_resolution) @@ -424,40 +462,48 @@ class Trellis2UpsampleCascade(IO.ComfyNode): ) decoder_dtype = next(decoder.parameters()).dtype - final_coords_list = [] - output_resolutions = [] - output_coord_counts = [] - for batch_index, (feats_i, coords_i) in enumerate(items): + sample_hr_coords = [] + for feats_i, coords_i in items: feats_i = feats_i.to(device) coords_i = coords_i.to(device).clone() coords_i[:, 0] = 0 slat_i = shape_norm(feats_i, coords_i) slat_i.feats = slat_i.feats.to(decoder_dtype) - hr_coords_i = decoder.upsample(slat_i, upsample_times=4) + sample_hr_coords.append(decoder.upsample(slat_i, upsample_times=4)) - hr_resolution = target_resolution - while True: + hr_resolution = target_resolution + while True: + exceeds_limit = False + for hr_coords_i in sample_hr_coords: quant_coords_i = torch.cat([ hr_coords_i[:, :1], ((hr_coords_i[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(), ], dim=1) - final_coords_i = quant_coords_i.unique(dim=0) - num_tokens = final_coords_i.shape[0] - - if num_tokens < max_tokens or hr_resolution <= 1024: + if quant_coords_i.unique(dim=0).shape[0] >= max_tokens: + exceeds_limit = True break - hr_resolution -= 128 + if not exceeds_limit or hr_resolution <= 1024: + break + hr_resolution -= 128 + final_coords_list = [] + output_coord_counts = [] + for sample_offset, hr_coords_i in enumerate(sample_hr_coords): + quant_coords_i = torch.cat([ + hr_coords_i[:, :1], + ((hr_coords_i[:, 1:] + 0.5) / lr_resolution * (hr_resolution // 16)).int(), + ], dim=1) + final_coords_i = quant_coords_i.unique(dim=0) final_coords_i = final_coords_i.clone() - final_coords_i[:, 0] = batch_index + final_coords_i[:, 0] = sample_offset final_coords_list.append(final_coords_i) - output_resolutions.append(int(hr_resolution)) output_coord_counts.append(int(final_coords_i.shape[0])) return IO.NodeOutput({ "coords": torch.cat(final_coords_list, dim=0), "coord_counts": torch.tensor(output_coord_counts, dtype=torch.int64), - "resolutions": torch.tensor(output_resolutions, dtype=torch.int64), + "resolutions": torch.full((len(final_coords_list),), int(hr_resolution), dtype=torch.int64), + "batch_index": normalize_batch_index(batch_index), },) dino_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) @@ -612,7 +658,8 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): category="latent/3d", inputs=[ IO.AnyType.Input("structure_or_coords"), - IO.Model.Input("model") + IO.Model.Input("model"), + IO.Int.Input("seed", default=0, min=0, max=0xffffffffffffffff), ], outputs=[ IO.Latent.Output(), @@ -621,21 +668,24 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): ) @classmethod - def execute(cls, structure_or_coords, model): + def execute(cls, structure_or_coords, model, seed): # to accept the upscaled coords is_512_pass = False coord_counts = None coord_resolutions = None + batch_index = None if hasattr(structure_or_coords, "data") and structure_or_coords.data.ndim == 4: decoded = structure_or_coords.data.unsqueeze(1) coords = torch.argwhere(decoded.bool())[:, [0, 2, 3, 4]].int() is_512_pass = True + batch_index = normalize_batch_index(getattr(structure_or_coords, "batch_index", None)) elif isinstance(structure_or_coords, dict): coords = structure_or_coords["coords"].int() coord_counts = structure_or_coords.get("coord_counts") coord_resolutions = structure_or_coords.get("resolutions") + batch_index = normalize_batch_index(structure_or_coords.get("batch_index")) is_512_pass = False elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2: @@ -655,15 +705,17 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): else: coord_counts = inferred_coord_counts if batch_size == 1: - coord_counts = None - latent = torch.randn(1, in_channels, coords.shape[0], 1) + sample_indices = normalize_batch_index(batch_index) or [0] + generator = torch.Generator(device="cpu") + generator.manual_seed(int(seed) + int(sample_indices[0])) + latent = torch.randn(1, in_channels, coords.shape[0], 1, generator=generator) else: + sample_indices = resolve_sample_indices(batch_index, batch_size) latent = torch.zeros(batch_size, in_channels, max_tokens, 1) - base_state = torch.random.get_rng_state() - for i in range(batch_size): + for i, sample_index in enumerate(sample_indices): count = int(coord_counts[i].item()) generator = torch.Generator(device="cpu") - generator.set_state(base_state.clone()) + generator.manual_seed(int(seed) + int(sample_index)) latent_i = torch.randn(1, in_channels, count, 1, generator=generator) latent[i, :, :count] = latent_i[0] if coord_counts is not None: @@ -685,11 +737,12 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): else: model.model_options["transformer_options"]["generation_mode"] = "shape_generation" output = {"samples": latent, "coords": coords, "type": "trellis2"} + if batch_index is not None: + output["batch_index"] = normalize_batch_index(batch_index) if coord_counts is not None: output["coord_counts"] = coord_counts if coord_resolutions is not None: output["coord_resolutions"] = coord_resolutions - output["batch_index"] = [0] * batch_size return IO.NodeOutput(output, model) class EmptyTextureLatentTrellis2(IO.ComfyNode): @@ -701,7 +754,8 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): inputs=[ IO.Voxel.Input("structure_or_coords"), IO.Latent.Input("shape_latent"), - IO.Model.Input("model") + IO.Model.Input("model"), + IO.Int.Input("seed", default=0, min=0, max=0xffffffffffffffff), ], outputs=[ IO.Latent.Output(), @@ -710,20 +764,24 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): ) @classmethod - def execute(cls, structure_or_coords, shape_latent, model): + def execute(cls, structure_or_coords, shape_latent, model, seed): channels = 32 coord_counts = None + batch_index = None if hasattr(structure_or_coords, "data") and structure_or_coords.data.ndim == 4: decoded = structure_or_coords.data.unsqueeze(1) coords = torch.argwhere(decoded.bool())[:, [0, 2, 3, 4]].int() + batch_index = normalize_batch_index(getattr(structure_or_coords, "batch_index", None)) elif isinstance(structure_or_coords, dict): coords = structure_or_coords["coords"].int() coord_counts = structure_or_coords.get("coord_counts") + batch_index = normalize_batch_index(structure_or_coords.get("batch_index")) elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2: coords = structure_or_coords.int() + shape_batch_index = normalize_batch_index(shape_latent.get("batch_index")) shape_latent = shape_latent["samples"] batch_size, inferred_coord_counts, max_tokens = infer_batched_coord_layout(coords) if coord_counts is not None: @@ -746,19 +804,23 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): ) if batch_size == 1: - coord_counts = None - latent = torch.randn(1, channels, coords.shape[0], 1) + sample_indices = normalize_batch_index(batch_index) or [0] + generator = torch.Generator(device="cpu") + generator.manual_seed(int(seed) + int(sample_indices[0])) + latent = torch.randn(1, channels, coords.shape[0], 1, generator=generator) else: + sample_indices = resolve_sample_indices(batch_index, batch_size) latent = torch.zeros(batch_size, channels, max_tokens, 1) - base_state = torch.random.get_rng_state() - for i in range(batch_size): + for i, sample_index in enumerate(sample_indices): count = int(coord_counts[i].item()) generator = torch.Generator(device="cpu") - generator.set_state(base_state.clone()) + generator.manual_seed(int(seed) + int(sample_index)) latent_i = torch.randn(1, channels, count, 1, generator=generator) latent[i, :, :count] = latent_i[0] if coord_counts is not None: latent.trellis_coord_counts = coord_counts.clone() + if batch_index is None: + batch_index = shape_batch_index model = model.clone() model.model_options = model.model_options.copy() if "transformer_options" in model.model_options: @@ -772,9 +834,10 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): model.model_options["transformer_options"]["generation_mode"] = "texture_generation" model.model_options["transformer_options"]["shape_slat"] = shape_latent output = {"samples": latent, "coords": coords, "type": "trellis2"} + if batch_index is not None: + output["batch_index"] = normalize_batch_index(batch_index) if coord_counts is not None: output["coord_counts"] = coord_counts - output["batch_index"] = [0] * batch_size return IO.NodeOutput(output, model) @@ -786,19 +849,29 @@ class EmptyStructureLatentTrellis2(IO.ComfyNode): category="latent/3d", inputs=[ IO.Int.Input("batch_size", default=1, min=1, max=4096, tooltip="The number of latent images in the batch."), + IO.Int.Input("batch_index_start", default=0, min=0, max=4096, tooltip="Starting sample index for per-sample sampler noise."), + IO.Int.Input("seed", default=0, min=0, max=0xffffffffffffffff), ], outputs=[ IO.Latent.Output(), ] ) @classmethod - def execute(cls, batch_size): + def execute(cls, batch_size, batch_index_start, seed): in_channels = 8 resolution = 16 - latent = torch.randn(1, in_channels, resolution, resolution, resolution).repeat(batch_size, 1, 1, 1, 1) - output = {"samples": latent, "type": "trellis2"} - if batch_size > 1: - output["batch_index"] = [0] * batch_size + sample_indices = [int(batch_index_start) + i for i in range(batch_size)] + latent = torch.zeros(batch_size, in_channels, resolution, resolution, resolution) + for i, sample_index in enumerate(sample_indices): + generator = torch.Generator(device="cpu") + generator.manual_seed(int(seed) + sample_index) + latent[i] = torch.randn(1, in_channels, resolution, resolution, resolution, generator=generator)[0] + output = { + "samples": latent, + "type": "trellis2", + } + if batch_size > 1 or batch_index_start != 0: + output["batch_index"] = sample_indices return IO.NodeOutput(output) def simplify_fn(vertices, faces, colors=None, target=100000): diff --git a/tests-unit/comfy_extras_test/nodes_trellis2_test.py b/tests-unit/comfy_extras_test/nodes_trellis2_test.py index 920eca471..95f64d031 100644 --- a/tests-unit/comfy_extras_test/nodes_trellis2_test.py +++ b/tests-unit/comfy_extras_test/nodes_trellis2_test.py @@ -123,5 +123,88 @@ class TestRunConditioningRestore(unittest.TestCase): self.assertFalse(hasattr(inner_model, "image_size")) +class DummyCloneModel: + def __init__(self): + self.model_options = {} + + def clone(self): + cloned = DummyCloneModel() + cloned.model_options = self.model_options.copy() + return cloned + + +class TestTrellisBatchSemantics(unittest.TestCase): + def test_empty_structure_latent_is_deterministic_and_propagates_sample_indices(self): + batch_output = nodes_trellis2.EmptyStructureLatentTrellis2.execute(2, 0, 17)[0] + single_output = nodes_trellis2.EmptyStructureLatentTrellis2.execute(1, 5, 17)[0] + + expected_batch = torch.zeros(2, 8, 16, 16, 16) + expected_batch[0] = torch.randn(1, 8, 16, 16, 16, generator=torch.Generator(device="cpu").manual_seed(17))[0] + expected_batch[1] = torch.randn(1, 8, 16, 16, 16, generator=torch.Generator(device="cpu").manual_seed(18))[0] + expected_single = torch.randn(1, 8, 16, 16, 16, generator=torch.Generator(device="cpu").manual_seed(22)) + + self.assertTrue(torch.equal(batch_output["samples"], expected_batch)) + self.assertEqual(batch_output["batch_index"], [0, 1]) + self.assertTrue(torch.equal(single_output["samples"], expected_single)) + self.assertEqual(single_output["batch_index"], [5]) + + def test_empty_shape_latent_is_deterministic_and_propagates_batch_index(self): + coords = torch.tensor( + [ + [1, 5, 5, 5], + [0, 1, 1, 1], + [1, 6, 6, 6], + [0, 2, 2, 2], + [1, 7, 7, 7], + ], + dtype=torch.int32, + ) + structure = { + "coords": coords, + "coord_counts": torch.tensor([2, 3], dtype=torch.int64), + "batch_index": [4, 9], + } + + output, _ = nodes_trellis2.EmptyShapeLatentTrellis2.execute(structure, DummyCloneModel(), 23) + + expected = torch.zeros(2, 32, 3, 1) + expected[0, :, :2, :] = torch.randn(1, 32, 2, 1, generator=torch.Generator(device="cpu").manual_seed(27))[0] + expected[1, :, :3, :] = torch.randn(1, 32, 3, 1, generator=torch.Generator(device="cpu").manual_seed(32))[0] + + self.assertTrue(torch.equal(output["samples"], expected)) + self.assertTrue(torch.equal(output["coord_counts"], torch.tensor([2, 3], dtype=torch.int64))) + self.assertEqual(output["batch_index"], [4, 9]) + + def test_empty_shape_latent_keeps_singleton_coord_counts(self): + structure = { + "coords": torch.tensor( + [ + [0, 1, 1, 1], + [0, 2, 2, 2], + ], + dtype=torch.int32, + ), + } + + output, _ = nodes_trellis2.EmptyShapeLatentTrellis2.execute(structure, DummyCloneModel(), 11) + + self.assertTrue(torch.equal(output["coord_counts"], torch.tensor([2], dtype=torch.int64))) + + def test_flatten_batched_sparse_latent_validates_coord_counts(self): + samples = torch.zeros(2, 32, 3, 1) + coords = torch.tensor( + [ + [0, 1, 1, 1], + [1, 2, 2, 2], + [1, 3, 3, 3], + ], + dtype=torch.int32, + ) + coord_counts = torch.tensor([2, 1], dtype=torch.int64) + + with self.assertRaises(ValueError): + nodes_trellis2.flatten_batched_sparse_latent(samples, coords, coord_counts) + + if __name__ == "__main__": unittest.main() diff --git a/tests-unit/comfy_test/sample_test.py b/tests-unit/comfy_test/sample_test.py new file mode 100644 index 000000000..ad154aca8 --- /dev/null +++ b/tests-unit/comfy_test/sample_test.py @@ -0,0 +1,47 @@ +import unittest + +import torch + +import comfy.sample + + +class TestPrepareNoiseInnerTrellis(unittest.TestCase): + def test_coord_counts_noise_matches_per_index_prefix_draws(self): + latent = torch.zeros(2, 4, 5, 1) + latent.trellis_coord_counts = torch.tensor([3, 5], dtype=torch.int64) + + generator = torch.Generator(device="cpu") + generator.manual_seed(123) + noise = comfy.sample.prepare_noise_inner(latent, generator) + + expected = torch.zeros_like(noise, dtype=torch.float32) + row0 = torch.Generator(device="cpu") + row0.manual_seed(123) + expected[0, :, :3, :] = torch.randn(1, 4, 3, 1, generator=row0)[0] + row1 = torch.Generator(device="cpu") + row1.manual_seed(124) + expected[1] = torch.randn(1, 4, 5, 1, generator=row1)[0] + + self.assertTrue(torch.equal(noise.float(), expected)) + self.assertTrue(torch.equal(noise[0, :, 3:, :], torch.zeros_like(noise[0, :, 3:, :]))) + + def test_coord_counts_noise_inds_share_prefixes_for_duplicates(self): + latent = torch.zeros(2, 4, 5, 1) + latent.trellis_coord_counts = torch.tensor([3, 5], dtype=torch.int64) + + generator = torch.Generator(device="cpu") + generator.manual_seed(456) + noise = comfy.sample.prepare_noise_inner(latent, generator, noise_inds=[7, 7]) + + replay = torch.Generator(device="cpu") + replay.manual_seed(463) + expected1 = torch.randn(1, 4, 5, 1, generator=replay) + expected0 = expected1[:, :, :3, :] + + self.assertTrue(torch.equal(noise[0:1, :, :3, :], expected0)) + self.assertTrue(torch.equal(noise[1:2, :, :5, :], expected1)) + self.assertTrue(torch.equal(noise[0, :, 3:, :], torch.zeros_like(noise[0, :, 3:, :]))) + + +if __name__ == "__main__": + unittest.main() From a752dd473642020b66b4171600154b4435d20638 Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 14:46:23 -0500 Subject: [PATCH 4/8] Harden Trellis sparse metadata validation --- comfy/ldm/trellis2/model.py | 4 + comfy_extras/nodes_trellis2.py | 34 ++++++-- .../comfy_extras_test/nodes_trellis2_test.py | 77 +++++++++++++++++++ 3 files changed, 108 insertions(+), 7 deletions(-) diff --git a/comfy/ldm/trellis2/model.py b/comfy/ldm/trellis2/model.py index 15939e5c6..7cf3e728e 100644 --- a/comfy/ldm/trellis2/model.py +++ b/comfy/ldm/trellis2/model.py @@ -853,6 +853,10 @@ class Trellis2(nn.Module): raise ValueError( f"Trellis2 coord_counts batch {logical_batch} doesn't divide latent batch {B}" ) + if int(coord_counts.sum().item()) != coords.shape[0]: + raise ValueError( + f"Trellis2 coord_counts total {int(coord_counts.sum().item())} does not match coords rows {coords.shape[0]}" + ) batch_ids = coords[:, 0].to(torch.int64) order = torch.argsort(batch_ids, stable=True) sorted_coords = coords.index_select(0, order) diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index 6556ed176..ce184a946 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -105,6 +105,8 @@ def infer_batched_coord_layout(coords): raise ValueError("Trellis2 coords can't be empty") batch_ids = coords[:, 0].to(torch.int64) + if (batch_ids < 0).any(): + raise ValueError(f"Trellis2 batch ids must be non-negative, got {batch_ids.unique(sorted=True).tolist()}") batch_size = int(batch_ids.max().item()) + 1 counts = torch.bincount(batch_ids, minlength=batch_size) @@ -116,6 +118,15 @@ def infer_batched_coord_layout(coords): def split_batched_coords(coords, coord_counts): + if coord_counts.ndim != 1: + raise ValueError(f"Trellis2 coord_counts must be 1D, got shape {tuple(coord_counts.shape)}") + if (coord_counts < 0).any(): + raise ValueError(f"Trellis2 coord_counts must be non-negative, got {coord_counts.tolist()}") + if int(coord_counts.sum().item()) != coords.shape[0]: + raise ValueError( + f"Trellis2 coord_counts total {int(coord_counts.sum().item())} does not match coords rows {coords.shape[0]}" + ) + batch_ids = coords[:, 0].to(torch.int64) order = torch.argsort(batch_ids, stable=True) sorted_coords = coords.index_select(0, order) @@ -153,6 +164,17 @@ def resolve_sample_indices(batch_index, batch_size): return sample_indices +def resolve_singleton_sample_index(batch_index): + sample_indices = normalize_batch_index(batch_index) + if sample_indices is None: + return 0 + if len(sample_indices) != 1: + raise ValueError( + f"Trellis2 batch_index must be an int or single-element iterable for singleton coords, got {sample_indices}" + ) + return int(sample_indices[0]) + + def flatten_batched_sparse_latent(samples, coords, coord_counts): samples = samples.squeeze(-1).transpose(1, 2) if coord_counts is None: @@ -705,9 +727,9 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): else: coord_counts = inferred_coord_counts if batch_size == 1: - sample_indices = normalize_batch_index(batch_index) or [0] + sample_index = resolve_singleton_sample_index(batch_index) generator = torch.Generator(device="cpu") - generator.manual_seed(int(seed) + int(sample_indices[0])) + generator.manual_seed(int(seed) + sample_index) latent = torch.randn(1, in_channels, coords.shape[0], 1, generator=generator) else: sample_indices = resolve_sample_indices(batch_index, batch_size) @@ -730,8 +752,6 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): model.model_options["transformer_options"]["coords"] = coords if coord_counts is not None: model.model_options["transformer_options"]["coord_counts"] = coord_counts - if coord_resolutions is not None: - model.model_options["transformer_options"]["coord_resolutions"] = coord_resolutions if is_512_pass: model.model_options["transformer_options"]["generation_mode"] = "shape_generation_512" else: @@ -742,7 +762,7 @@ class EmptyShapeLatentTrellis2(IO.ComfyNode): if coord_counts is not None: output["coord_counts"] = coord_counts if coord_resolutions is not None: - output["coord_resolutions"] = coord_resolutions + output["resolutions"] = coord_resolutions return IO.NodeOutput(output, model) class EmptyTextureLatentTrellis2(IO.ComfyNode): @@ -804,9 +824,9 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): ) if batch_size == 1: - sample_indices = normalize_batch_index(batch_index) or [0] + sample_index = resolve_singleton_sample_index(batch_index) generator = torch.Generator(device="cpu") - generator.manual_seed(int(seed) + int(sample_indices[0])) + generator.manual_seed(int(seed) + sample_index) latent = torch.randn(1, channels, coords.shape[0], 1, generator=generator) else: sample_indices = resolve_sample_indices(batch_index, batch_size) diff --git a/tests-unit/comfy_extras_test/nodes_trellis2_test.py b/tests-unit/comfy_extras_test/nodes_trellis2_test.py index 95f64d031..196a88343 100644 --- a/tests-unit/comfy_extras_test/nodes_trellis2_test.py +++ b/tests-unit/comfy_extras_test/nodes_trellis2_test.py @@ -190,6 +190,40 @@ class TestTrellisBatchSemantics(unittest.TestCase): self.assertTrue(torch.equal(output["coord_counts"], torch.tensor([2], dtype=torch.int64))) + def test_empty_shape_latent_rejects_multi_index_singleton(self): + structure = { + "coords": torch.tensor( + [ + [0, 1, 1, 1], + [0, 2, 2, 2], + ], + dtype=torch.int32, + ), + "batch_index": [5, 6], + } + + with self.assertRaises(ValueError): + nodes_trellis2.EmptyShapeLatentTrellis2.execute(structure, DummyCloneModel(), 11) + + def test_empty_texture_latent_rejects_multi_index_singleton(self): + coords = torch.tensor( + [ + [0, 1, 1, 1], + [0, 2, 2, 2], + ], + dtype=torch.int32, + ) + structure = {"coords": coords, "batch_index": [7, 8]} + shape_latent = {"samples": torch.zeros(1, 32, 2, 1)} + + with self.assertRaises(ValueError): + nodes_trellis2.EmptyTextureLatentTrellis2.execute( + structure, + shape_latent, + DummyCloneModel(), + 13, + ) + def test_flatten_batched_sparse_latent_validates_coord_counts(self): samples = torch.zeros(2, 32, 3, 1) coords = torch.tensor( @@ -205,6 +239,49 @@ class TestTrellisBatchSemantics(unittest.TestCase): with self.assertRaises(ValueError): nodes_trellis2.flatten_batched_sparse_latent(samples, coords, coord_counts) + def test_infer_batched_coord_layout_rejects_negative_batch_ids(self): + coords = torch.tensor( + [ + [-1, 1, 1, 1], + [0, 2, 2, 2], + ], + dtype=torch.int32, + ) + + with self.assertRaises(ValueError): + nodes_trellis2.infer_batched_coord_layout(coords) + + def test_split_batched_coords_validates_total_count(self): + coords = torch.tensor( + [ + [0, 1, 1, 1], + [1, 2, 2, 2], + [1, 3, 3, 3], + ], + dtype=torch.int32, + ) + coord_counts = torch.tensor([1, 1], dtype=torch.int64) + + with self.assertRaises(ValueError): + nodes_trellis2.split_batched_coords(coords, coord_counts) + + def test_empty_shape_latent_preserves_resolutions_key(self): + structure = { + "coords": torch.tensor( + [ + [0, 1, 1, 1], + [0, 2, 2, 2], + ], + dtype=torch.int32, + ), + "resolutions": torch.tensor([1024], dtype=torch.int64), + } + + output, model = nodes_trellis2.EmptyShapeLatentTrellis2.execute(structure, DummyCloneModel(), 11) + + self.assertTrue(torch.equal(output["resolutions"], torch.tensor([1024], dtype=torch.int64))) + self.assertNotIn("coord_resolutions", model.model_options["transformer_options"]) + if __name__ == "__main__": unittest.main() From 0b99c8c44acf964b9989b71439826d2582363238 Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 15:50:40 -0500 Subject: [PATCH 5/8] Fail loud on Trellis invalid batch metadata --- comfy/sample.py | 4 ++++ comfy_extras/nodes_trellis2.py | 5 +++++ tests-unit/comfy_extras_test/nodes_trellis2_test.py | 9 +++++++++ tests-unit/comfy_test/sample_test.py | 10 ++++++++++ 4 files changed, 28 insertions(+) diff --git a/comfy/sample.py b/comfy/sample.py index 6fba221ed..8626269a1 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -14,6 +14,10 @@ def prepare_noise_inner(latent_image, generator, noise_inds=None): noise_inds = np.arange(latent_image.size(0), dtype=np.int64) else: noise_inds = np.asarray(noise_inds, dtype=np.int64) + if noise_inds.shape[0] != latent_image.size(0): + raise ValueError( + f"Trellis2 noise_inds length {noise_inds.shape[0]} does not match latent batch {latent_image.size(0)}" + ) base_seed = int(generator.initial_seed()) unique_inds = np.unique(noise_inds) diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index ce184a946..328cec6e7 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -800,6 +800,11 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): elif isinstance(structure_or_coords, torch.Tensor) and structure_or_coords.ndim == 2: coords = structure_or_coords.int() + else: + raise ValueError( + "structure_or_coords must be a voxel input with data.ndim == 4, " + f'a dict containing "coords", or a 2D torch.Tensor; got {type(structure_or_coords).__name__}' + ) shape_batch_index = normalize_batch_index(shape_latent.get("batch_index")) shape_latent = shape_latent["samples"] diff --git a/tests-unit/comfy_extras_test/nodes_trellis2_test.py b/tests-unit/comfy_extras_test/nodes_trellis2_test.py index 196a88343..43647e793 100644 --- a/tests-unit/comfy_extras_test/nodes_trellis2_test.py +++ b/tests-unit/comfy_extras_test/nodes_trellis2_test.py @@ -224,6 +224,15 @@ class TestTrellisBatchSemantics(unittest.TestCase): 13, ) + def test_empty_texture_latent_rejects_invalid_structure_input(self): + with self.assertRaises(ValueError): + nodes_trellis2.EmptyTextureLatentTrellis2.execute( + "bad-input", + {"samples": torch.zeros(1, 32, 2, 1)}, + DummyCloneModel(), + 13, + ) + def test_flatten_batched_sparse_latent_validates_coord_counts(self): samples = torch.zeros(2, 32, 3, 1) coords = torch.tensor( diff --git a/tests-unit/comfy_test/sample_test.py b/tests-unit/comfy_test/sample_test.py index ad154aca8..e76e65266 100644 --- a/tests-unit/comfy_test/sample_test.py +++ b/tests-unit/comfy_test/sample_test.py @@ -42,6 +42,16 @@ class TestPrepareNoiseInnerTrellis(unittest.TestCase): self.assertTrue(torch.equal(noise[1:2, :, :5, :], expected1)) self.assertTrue(torch.equal(noise[0, :, 3:, :], torch.zeros_like(noise[0, :, 3:, :]))) + def test_coord_counts_noise_inds_length_must_match_batch(self): + latent = torch.zeros(2, 4, 5, 1) + latent.trellis_coord_counts = torch.tensor([3, 5], dtype=torch.int64) + + generator = torch.Generator(device="cpu") + generator.manual_seed(456) + + with self.assertRaises(ValueError): + comfy.sample.prepare_noise_inner(latent, generator, noise_inds=[7]) + if __name__ == "__main__": unittest.main() From 90ebb50f00bf89ed8c947a0e4ed4ed0803981ea1 Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 16:05:10 -0500 Subject: [PATCH 6/8] Harden Trellis sparse latent seeding --- comfy/ldm/trellis2/model.py | 4 +++ comfy/sample.py | 2 -- comfy_extras/nodes_trellis2.py | 4 +-- .../comfy_extras_test/nodes_trellis2_test.py | 29 +++++++++++++++++++ 4 files changed, 35 insertions(+), 4 deletions(-) diff --git a/comfy/ldm/trellis2/model.py b/comfy/ldm/trellis2/model.py index 7cf3e728e..e8ed39aed 100644 --- a/comfy/ldm/trellis2/model.py +++ b/comfy/ldm/trellis2/model.py @@ -880,6 +880,10 @@ class Trellis2(nn.Module): for i in range(logical_batch): out_index = rep * logical_batch + i count = int(coord_counts[i].item()) + if count > N: + raise ValueError( + f"Trellis2 coord count {count} exceeds latent token dimension {N} for batch {i}" + ) coords_i = coords_by_batch[i].clone() coords_i[:, 0] = 0 feats_i = x_eval[out_index, :count].clone() diff --git a/comfy/sample.py b/comfy/sample.py index 8626269a1..a4ce5f56f 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -74,8 +74,6 @@ def prepare_noise(latent_image, seed, noise_inds=None): def fix_empty_latent_channels(model, latent_image, downscale_ratio_spacial=None): if latent_image.is_nested: return latent_image - if getattr(latent_image, "trellis_skip_empty_fix", False): - return latent_image latent_format = model.get_model_object("latent_format") #Resize the empty latent image so it has the right number of channels if torch.count_nonzero(latent_image) == 0: if latent_format.latent_channels != latent_image.shape[1]: diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index 328cec6e7..d345641b1 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -807,6 +807,8 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): ) shape_batch_index = normalize_batch_index(shape_latent.get("batch_index")) + if batch_index is None: + batch_index = shape_batch_index shape_latent = shape_latent["samples"] batch_size, inferred_coord_counts, max_tokens = infer_batched_coord_layout(coords) if coord_counts is not None: @@ -844,8 +846,6 @@ class EmptyTextureLatentTrellis2(IO.ComfyNode): latent[i, :, :count] = latent_i[0] if coord_counts is not None: latent.trellis_coord_counts = coord_counts.clone() - if batch_index is None: - batch_index = shape_batch_index model = model.clone() model.model_options = model.model_options.copy() if "transformer_options" in model.model_options: diff --git a/tests-unit/comfy_extras_test/nodes_trellis2_test.py b/tests-unit/comfy_extras_test/nodes_trellis2_test.py index 43647e793..49e872bc7 100644 --- a/tests-unit/comfy_extras_test/nodes_trellis2_test.py +++ b/tests-unit/comfy_extras_test/nodes_trellis2_test.py @@ -233,6 +233,35 @@ class TestTrellisBatchSemantics(unittest.TestCase): 13, ) + def test_empty_texture_latent_uses_shape_batch_index_for_seed_fallback(self): + coords = torch.tensor( + [ + [0, 1, 1, 1], + [1, 2, 2, 2], + [1, 3, 3, 3], + ], + dtype=torch.int32, + ) + structure = {"coords": coords} + shape_latent = { + "samples": torch.zeros(2, 32, 2, 1), + "batch_index": [4, 9], + } + + output, _ = nodes_trellis2.EmptyTextureLatentTrellis2.execute( + structure, + shape_latent, + DummyCloneModel(), + 13, + ) + + expected = torch.zeros(2, 32, 2, 1) + expected[0, :, :1, :] = torch.randn(1, 32, 1, 1, generator=torch.Generator(device="cpu").manual_seed(17))[0] + expected[1, :, :2, :] = torch.randn(1, 32, 2, 1, generator=torch.Generator(device="cpu").manual_seed(22))[0] + + self.assertTrue(torch.equal(output["samples"], expected)) + self.assertEqual(output["batch_index"], [4, 9]) + def test_flatten_batched_sparse_latent_validates_coord_counts(self): samples = torch.zeros(2, 32, 3, 1) coords = torch.tensor( From 33caec301a6f1a6ab4e802555e80a0e0c5e5c83c Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 16:36:48 -0500 Subject: [PATCH 7/8] Validate Trellis coord_counts noise metadata --- comfy/sample.py | 10 ++++++++++ tests-unit/comfy_test/sample_test.py | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/comfy/sample.py b/comfy/sample.py index a4ce5f56f..878c4e984 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -9,6 +9,16 @@ import comfy.nested_tensor def prepare_noise_inner(latent_image, generator, noise_inds=None): coord_counts = getattr(latent_image, "trellis_coord_counts", None) if coord_counts is not None: + if coord_counts.ndim != 1: + raise ValueError(f"Trellis2 coord_counts must be 1D, got shape {tuple(coord_counts.shape)}") + if coord_counts.shape[0] != latent_image.size(0): + raise ValueError( + f"Trellis2 coord_counts length {coord_counts.shape[0]} does not match latent batch {latent_image.size(0)}" + ) + if (coord_counts < 0).any() or (coord_counts > latent_image.size(2)).any(): + raise ValueError( + f"Trellis2 coord_counts must be within [0, {latent_image.size(2)}], got {coord_counts.tolist()}" + ) noise = torch.zeros(latent_image.size(), dtype=torch.float32, layout=latent_image.layout, device="cpu") if noise_inds is None: noise_inds = np.arange(latent_image.size(0), dtype=np.int64) diff --git a/tests-unit/comfy_test/sample_test.py b/tests-unit/comfy_test/sample_test.py index e76e65266..227659994 100644 --- a/tests-unit/comfy_test/sample_test.py +++ b/tests-unit/comfy_test/sample_test.py @@ -52,6 +52,25 @@ class TestPrepareNoiseInnerTrellis(unittest.TestCase): with self.assertRaises(ValueError): comfy.sample.prepare_noise_inner(latent, generator, noise_inds=[7]) + def test_coord_counts_metadata_must_match_batch_and_bounds(self): + generator = torch.Generator(device="cpu") + generator.manual_seed(456) + + latent = torch.zeros(2, 4, 5, 1) + latent.trellis_coord_counts = torch.tensor([[3, 5]], dtype=torch.int64) + with self.assertRaises(ValueError): + comfy.sample.prepare_noise_inner(latent, generator) + + latent = torch.zeros(2, 4, 5, 1) + latent.trellis_coord_counts = torch.tensor([3], dtype=torch.int64) + with self.assertRaises(ValueError): + comfy.sample.prepare_noise_inner(latent, generator) + + latent = torch.zeros(2, 4, 5, 1) + latent.trellis_coord_counts = torch.tensor([3, 6], dtype=torch.int64) + with self.assertRaises(ValueError): + comfy.sample.prepare_noise_inner(latent, generator) + if __name__ == "__main__": unittest.main() From 939ac7ebb40f486820cc39a47ff0d3c28b3c44b2 Mon Sep 17 00:00:00 2001 From: John Pollock Date: Mon, 20 Apr 2026 17:20:57 -0500 Subject: [PATCH 8/8] Omit null batch_index from Trellis upsample output --- comfy_extras/nodes_trellis2.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/comfy_extras/nodes_trellis2.py b/comfy_extras/nodes_trellis2.py index d345641b1..56ec3e736 100644 --- a/comfy_extras/nodes_trellis2.py +++ b/comfy_extras/nodes_trellis2.py @@ -521,12 +521,16 @@ class Trellis2UpsampleCascade(IO.ComfyNode): final_coords_list.append(final_coords_i) output_coord_counts.append(int(final_coords_i.shape[0])) - return IO.NodeOutput({ + normalized_batch_index = normalize_batch_index(batch_index) + output = { "coords": torch.cat(final_coords_list, dim=0), "coord_counts": torch.tensor(output_coord_counts, dtype=torch.int64), "resolutions": torch.full((len(final_coords_list),), int(hr_resolution), dtype=torch.int64), - "batch_index": normalize_batch_index(batch_index), - },) + } + if normalized_batch_index is not None: + output["batch_index"] = normalized_batch_index + + return IO.NodeOutput(output,) dino_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) dino_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)