mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-13 23:12:35 +08:00
whitespaces...
This commit is contained in:
parent
fb4739f2f5
commit
fcd3a00d91
@ -1596,7 +1596,7 @@ class HunyuanVideo15(HunyuanVideo):
|
|||||||
out['clip_fea'] = comfy.conds.CONDRegular(clip_vision_output.last_hidden_state)
|
out['clip_fea'] = comfy.conds.CONDRegular(clip_vision_output.last_hidden_state)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
|
||||||
class HunyuanVideo15_SR_Distilled(HunyuanImage21Refiner):
|
class HunyuanVideo15_SR_Distilled(HunyuanImage21Refiner):
|
||||||
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
|
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
|
||||||
super().__init__(model_config, model_type, device=device)
|
super().__init__(model_config, model_type, device=device)
|
||||||
|
|||||||
@ -159,13 +159,13 @@ class HunyuanVideo15RefinerLatent(io.ComfyNode):
|
|||||||
encoded = vae.encode(start_image[:, :, :, :3])
|
encoded = vae.encode(start_image[:, :, :, :3])
|
||||||
cond_latent[:, :in_channels, :encoded.shape[2], :, :] = encoded
|
cond_latent[:, :in_channels, :encoded.shape[2], :, :] = encoded
|
||||||
cond_latent[:, in_channels + 1, 0] = 1
|
cond_latent[:, in_channels + 1, 0] = 1
|
||||||
|
|
||||||
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": cond_latent, "noise_augmentation": noise_augmentation})
|
positive = node_helpers.conditioning_set_values(positive, {"concat_latent_image": cond_latent, "noise_augmentation": noise_augmentation})
|
||||||
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": cond_latent, "noise_augmentation": noise_augmentation})
|
negative = node_helpers.conditioning_set_values(negative, {"concat_latent_image": cond_latent, "noise_augmentation": noise_augmentation})
|
||||||
if clip_vision_output is not None:
|
if clip_vision_output is not None:
|
||||||
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output})
|
positive = node_helpers.conditioning_set_values(positive, {"clip_vision_output": clip_vision_output})
|
||||||
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output})
|
negative = node_helpers.conditioning_set_values(negative, {"clip_vision_output": clip_vision_output})
|
||||||
|
|
||||||
return io.NodeOutput(positive, negative, latent)
|
return io.NodeOutput(positive, negative, latent)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user