Compare commits

...

4 Commits

Author SHA1 Message Date
envy-ai
43dd2b987f
Merge 27d11db345 into 7ee77ff038 2026-01-26 09:59:11 -05:00
comfyanonymous
7ee77ff038
Add name to LoraLoaderModelOnly. (#12078)
Some checks are pending
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
2026-01-25 21:01:55 -05:00
envy-ai
27d11db345 missed adding these in previous commit 2025-05-13 13:11:53 -04:00
envy-ai
90f23bac28 Use cached encoded empty strings rather than all zeros for better quality and prompt adherence with llama-only HiDreams 2025-04-21 01:11:22 -04:00
5 changed files with 13 additions and 5 deletions

View File

@ -5,6 +5,7 @@ from comfy import sdxl_clip
import comfy.model_management
import torch
import logging
import folder_paths
class HiDreamTokenizer:
@ -91,6 +92,8 @@ class HiDreamTEModel(torch.nn.Module):
token_weight_pairs_llama = token_weight_pairs["llama"]
lg_out = None
pooled = None
t5_out = None
ll_out = None
extra = {}
if len(token_weight_pairs_g) > 0 or len(token_weight_pairs_l) > 0:
@ -104,8 +107,9 @@ class HiDreamTEModel(torch.nn.Module):
else:
g_pooled = torch.zeros((1, 1280), device=comfy.model_management.intermediate_device())
pooled = torch.cat((l_pooled, g_pooled), dim=-1)
if self.clip_g is not None and self.clip_l is not None:
pooled = torch.cat((l_pooled, g_pooled), dim=-1)
if self.t5xxl is not None:
t5_output = self.t5xxl.encode_token_weights(token_weight_pairs_t5)
t5_out, t5_pooled = t5_output[:2]
@ -120,13 +124,15 @@ class HiDreamTEModel(torch.nn.Module):
ll_out = None
if t5_out is None:
t5_out = torch.zeros((1, 128, 4096), device=comfy.model_management.intermediate_device())
t5_path = folder_paths.get_full_path_or_raise("hidream_empty_latents", "t5_blank.pt")
t5_out = torch.load(t5_path, map_location=comfy.model_management.intermediate_device())
if ll_out is None:
ll_out = torch.zeros((1, 32, 1, 4096), device=comfy.model_management.intermediate_device())
if pooled is None:
pooled = torch.zeros((1, 768 + 1280), device=comfy.model_management.intermediate_device())
pooled_path = folder_paths.get_full_path_or_raise("hidream_empty_latents", "pooled_blank.pt")
pooled = torch.load(pooled_path, map_location=comfy.model_management.intermediate_device())
extra["conditioning_llama3"] = ll_out
return t5_out, pooled, extra

View File

@ -47,6 +47,7 @@ folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetwo
folder_names_and_paths["photomaker"] = ([os.path.join(models_dir, "photomaker")], supported_pt_extensions)
folder_names_and_paths["classifiers"] = ([os.path.join(models_dir, "classifiers")], {""})
folder_names_and_paths["hidream_empty_latents"] = ([os.path.join(models_dir, "hidream_empty_latents")], supported_pt_extensions)
folder_names_and_paths["model_patches"] = ([os.path.join(models_dir, "model_patches")], supported_pt_extensions)

Binary file not shown.

Binary file not shown.

View File

@ -2105,7 +2105,8 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"CheckpointLoader": "Load Checkpoint With Config (DEPRECATED)",
"CheckpointLoaderSimple": "Load Checkpoint",
"VAELoader": "Load VAE",
"LoraLoader": "Load LoRA",
"LoraLoader": "Load LoRA (Model and CLIP)",
"LoraLoaderModelOnly": "Load LoRA",
"CLIPLoader": "Load CLIP",
"ControlNetLoader": "Load ControlNet Model",
"DiffControlNetLoader": "Load ControlNet Model (diff)",