Compare commits

...

4 Commits

Author SHA1 Message Date
drozbay
bb52415057
Merge 9ee905bc47 into acbf08cd60 2026-01-04 13:06:25 +01:00
Alexander Piskun
acbf08cd60
feat(api-nodes): add support for 720p resolution for Kling Omni nodes (#11604)
Some checks failed
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Generate Pydantic Stubs from api.comfy.org / generate-models (push) Has been cancelled
2026-01-03 23:05:02 -08:00
drozbay
9ee905bc47
Merge branch 'master' into humo_i2v 2025-12-31 07:37:07 -07:00
ozbayb
d56d374c96 Allow Wan21_HuMo extra_conds to pass concat_latent_image through if present 2025-12-30 18:29:00 -07:00
2 changed files with 30 additions and 14 deletions

View File

@ -1248,22 +1248,23 @@ class WAN21_HuMo(WAN21):
if audio_embed is not None:
out['audio_embed'] = comfy.conds.CONDRegular(audio_embed)
if "c_concat" not in out: # 1.7B model
reference_latents = kwargs.get("reference_latents", None)
if reference_latents is not None:
if "c_concat" not in out and reference_latents is not None and reference_latents[0].shape[1] == 16: # 1.7B model
out['reference_latent'] = comfy.conds.CONDRegular(self.process_latent_in(reference_latents[-1]))
else:
noise_shape = list(noise.shape)
noise_shape[1] += 4
concat_latent = torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype)
zero_vae_values_first = torch.tensor([0.8660, -0.4326, -0.0017, -0.4884, -0.5283, 0.9207, -0.9896, 0.4433, -0.5543, -0.0113, 0.5753, -0.6000, -0.8346, -0.3497, -0.1926, -0.6938]).view(1, 16, 1, 1, 1)
zero_vae_values_second = torch.tensor([1.0869, -1.2370, 0.0206, -0.4357, -0.6411, 2.0307, -1.5972, 1.2659, -0.8595, -0.4654, 0.9638, -1.6330, -1.4310, -0.1098, -0.3856, -1.4583]).view(1, 16, 1, 1, 1)
zero_vae_values = torch.tensor([0.8642, -1.8583, 0.1577, 0.1350, -0.3641, 2.5863, -1.9670, 1.6065, -1.0475, -0.8678, 1.1734, -1.8138, -1.5933, -0.7721, -0.3289, -1.3745]).view(1, 16, 1, 1, 1)
concat_latent[:, 4:] = zero_vae_values
concat_latent[:, 4:, :1] = zero_vae_values_first
concat_latent[:, 4:, 1:2] = zero_vae_values_second
out['c_concat'] = comfy.conds.CONDNoiseShape(concat_latent)
reference_latents = kwargs.get("reference_latents", None)
else:
concat_latent_image = kwargs.get("concat_latent_image", None)
if concat_latent_image is None:
noise_shape = list(noise.shape)
noise_shape[1] += 4
concat_latent = torch.zeros(noise_shape, device=noise.device, dtype=noise.dtype)
zero_vae_values_first = torch.tensor([0.8660, -0.4326, -0.0017, -0.4884, -0.5283, 0.9207, -0.9896, 0.4433, -0.5543, -0.0113, 0.5753, -0.6000, -0.8346, -0.3497, -0.1926, -0.6938]).view(1, 16, 1, 1, 1)
zero_vae_values_second = torch.tensor([1.0869, -1.2370, 0.0206, -0.4357, -0.6411, 2.0307, -1.5972, 1.2659, -0.8595, -0.4654, 0.9638, -1.6330, -1.4310, -0.1098, -0.3856, -1.4583]).view(1, 16, 1, 1, 1)
zero_vae_values = torch.tensor([0.8642, -1.8583, 0.1577, 0.1350, -0.3641, 2.5863, -1.9670, 1.6065, -1.0475, -0.8678, 1.1734, -1.8138, -1.5933, -0.7721, -0.3289, -1.3745]).view(1, 16, 1, 1, 1)
concat_latent[:, 4:] = zero_vae_values
concat_latent[:, 4:, :1] = zero_vae_values_first
concat_latent[:, 4:, 1:2] = zero_vae_values_second
out['c_concat'] = comfy.conds.CONDNoiseShape(concat_latent)
if reference_latents is not None:
ref_latent = self.process_latent_in(reference_latents[-1])
ref_latent_shape = list(ref_latent.shape)

View File

@ -807,6 +807,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
),
IO.Combo.Input("aspect_ratio", options=["16:9", "9:16", "1:1"]),
IO.Combo.Input("duration", options=[5, 10]),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
],
outputs=[
IO.Video.Output(),
@ -826,6 +827,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
prompt: str,
aspect_ratio: str,
duration: int,
resolution: str = "1080p",
) -> IO.NodeOutput:
validate_string(prompt, min_length=1, max_length=2500)
response = await sync_op(
@ -837,6 +839,7 @@ class OmniProTextToVideoNode(IO.ComfyNode):
prompt=prompt,
aspect_ratio=aspect_ratio,
duration=str(duration),
mode="pro" if resolution == "1080p" else "std",
),
)
return await finish_omni_video_task(cls, response)
@ -872,6 +875,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
optional=True,
tooltip="Up to 6 additional reference images.",
),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
],
outputs=[
IO.Video.Output(),
@ -893,6 +897,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
first_frame: Input.Image,
end_frame: Input.Image | None = None,
reference_images: Input.Image | None = None,
resolution: str = "1080p",
) -> IO.NodeOutput:
prompt = normalize_omni_prompt_references(prompt)
validate_string(prompt, min_length=1, max_length=2500)
@ -936,6 +941,7 @@ class OmniProFirstLastFrameNode(IO.ComfyNode):
prompt=prompt,
duration=str(duration),
image_list=image_list,
mode="pro" if resolution == "1080p" else "std",
),
)
return await finish_omni_video_task(cls, response)
@ -964,6 +970,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
"reference_images",
tooltip="Up to 7 reference images.",
),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
],
outputs=[
IO.Video.Output(),
@ -984,6 +991,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
aspect_ratio: str,
duration: int,
reference_images: Input.Image,
resolution: str = "1080p",
) -> IO.NodeOutput:
prompt = normalize_omni_prompt_references(prompt)
validate_string(prompt, min_length=1, max_length=2500)
@ -1005,6 +1013,7 @@ class OmniProImageToVideoNode(IO.ComfyNode):
aspect_ratio=aspect_ratio,
duration=str(duration),
image_list=image_list,
mode="pro" if resolution == "1080p" else "std",
),
)
return await finish_omni_video_task(cls, response)
@ -1036,6 +1045,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
tooltip="Up to 4 additional reference images.",
optional=True,
),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
],
outputs=[
IO.Video.Output(),
@ -1058,6 +1068,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
reference_video: Input.Video,
keep_original_sound: bool,
reference_images: Input.Image | None = None,
resolution: str = "1080p",
) -> IO.NodeOutput:
prompt = normalize_omni_prompt_references(prompt)
validate_string(prompt, min_length=1, max_length=2500)
@ -1090,6 +1101,7 @@ class OmniProVideoToVideoNode(IO.ComfyNode):
duration=str(duration),
image_list=image_list if image_list else None,
video_list=video_list,
mode="pro" if resolution == "1080p" else "std",
),
)
return await finish_omni_video_task(cls, response)
@ -1119,6 +1131,7 @@ class OmniProEditVideoNode(IO.ComfyNode):
tooltip="Up to 4 additional reference images.",
optional=True,
),
IO.Combo.Input("resolution", options=["1080p", "720p"], optional=True),
],
outputs=[
IO.Video.Output(),
@ -1139,6 +1152,7 @@ class OmniProEditVideoNode(IO.ComfyNode):
video: Input.Video,
keep_original_sound: bool,
reference_images: Input.Image | None = None,
resolution: str = "1080p",
) -> IO.NodeOutput:
prompt = normalize_omni_prompt_references(prompt)
validate_string(prompt, min_length=1, max_length=2500)
@ -1171,6 +1185,7 @@ class OmniProEditVideoNode(IO.ComfyNode):
duration=None,
image_list=image_list if image_list else None,
video_list=video_list,
mode="pro" if resolution == "1080p" else "std",
),
)
return await finish_omni_video_task(cls, response)