diff --git a/comfy/ldm/chroma/layers_dct.py b/comfy/ldm/chroma/layers_dct.py index 0f8b81b43..24c1aa7ee 100644 --- a/comfy/ldm/chroma/layers_dct.py +++ b/comfy/ldm/chroma/layers_dct.py @@ -6,6 +6,7 @@ from torch import nn from comfy.ldm.flux.layers import RMSNorm + class NerfEmbedder(nn.Module): """ An embedder module that combines input features with a 2D positional @@ -130,6 +131,7 @@ class NerfEmbedder(nn.Module): # No-op if already the same dtype. return inputs.to(dtype=orig_dtype) + class NerfGLUBlock(nn.Module): """ A NerfBlock using a Gated Linear Unit (GLU) like MLP. @@ -182,6 +184,7 @@ class NerfFinalLayer(nn.Module): # So we temporarily move the channel dimension to the end for the norm operation. return self.linear(self.norm(x.movedim(1, -1))).movedim(-1, 1) + class NerfFinalLayerConv(nn.Module): def __init__(self, hidden_size, out_channels, dtype=None, device=None, operations=None): super().__init__() diff --git a/comfy/ldm/chroma/model_dct.py b/comfy/ldm/chroma/model_dct.py index e64bbba2f..1c5e0bb3a 100644 --- a/comfy/ldm/chroma/model_dct.py +++ b/comfy/ldm/chroma/model_dct.py @@ -10,10 +10,7 @@ from torch import Tensor, nn from einops import repeat import comfy.ldm.common_dit -from comfy.ldm.flux.layers import ( - EmbedND, - timestep_embedding, -) +from comfy.ldm.flux.layers import EmbedND from .layers import ( DoubleStreamBlock, @@ -29,6 +26,7 @@ from .layers_dct import ( from . import model as chroma_model + @dataclass class ChromaRadianceParams(chroma_model.ChromaParams): patch_size: int