This commit is contained in:
Octopus 2026-04-11 13:13:58 +08:00 committed by GitHub
commit 70f4c3d63e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 9 additions and 3 deletions

View File

@ -858,7 +858,7 @@ class LTXBaseModel(torch.nn.Module, ABC):
return attention_mask
def forward(
self, x, timestep, context, attention_mask, frame_rate=25, transformer_options={}, keyframe_idxs=None, denoise_mask=None, **kwargs
self, x, timestep, context, attention_mask=None, frame_rate=25, transformer_options={}, keyframe_idxs=None, denoise_mask=None, **kwargs
):
"""
Forward pass for LTX models.
@ -867,7 +867,7 @@ class LTXBaseModel(torch.nn.Module, ABC):
x: Input tensor
timestep: Timestep tensor
context: Context tensor (e.g., text embeddings)
attention_mask: Attention mask tensor
attention_mask: Attention mask tensor (optional)
frame_rate: Frame rate for temporal processing
transformer_options: Additional options for transformer blocks
keyframe_idxs: Keyframe indices for temporal processing
@ -885,7 +885,7 @@ class LTXBaseModel(torch.nn.Module, ABC):
).execute(x, timestep, context, attention_mask, frame_rate, transformer_options, keyframe_idxs, denoise_mask=denoise_mask, **kwargs)
def _forward(
self, x, timestep, context, attention_mask, frame_rate=25, transformer_options={}, keyframe_idxs=None, denoise_mask=None, **kwargs
self, x, timestep, context, attention_mask=None, frame_rate=25, transformer_options={}, keyframe_idxs=None, denoise_mask=None, **kwargs
):
"""
Internal forward pass for LTX models.

View File

@ -424,6 +424,12 @@ class CLIP:
return self.patcher.get_key_patches()
def generate(self, tokens, do_sample=True, max_length=256, temperature=1.0, top_k=50, top_p=0.95, min_p=0.0, repetition_penalty=1.0, seed=None, presence_penalty=0.0):
if not hasattr(self.cond_stage_model, 'generate'):
raise RuntimeError(
f"The loaded model ({type(self.cond_stage_model).__name__}) does not support text generation. "
"The TextGenerate node requires a language model (LLM) such as Qwen, LLaMA, or Gemma, "
"not a CLIP text encoder. Please load the correct model type."
)
self.cond_stage_model.reset_clip_options()
self.load_model(tokens)