Fix resize in I2I Node

This commit is contained in:
Mihail Karaev 2026-01-13 12:53:27 +00:00
parent b2593317bf
commit 646cf6600a

View File

@ -64,29 +64,27 @@ class Kandinsky5ImageToImage(io.ComfyNode):
node_id="Kandinsky5ImageToImage",
category="image",
inputs=[
io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"),
io.Vae.Input("vae"),
io.Int.Input("batch_size", default=1, min=1, max=4096),
io.Image.Input("start_image"),
],
outputs=[
io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(display_name="negative"),
io.Latent.Output(display_name="latent", tooltip="Empty video latent"),
io.Image.Output("resized_image"),
],
)
@classmethod
def execute(cls, positive, negative, vae, batch_size, start_image) -> io.NodeOutput:
def execute(cls, vae, batch_size, start_image) -> io.NodeOutput:
height, width = start_image.shape[1:-1]
available_res = [(1024, 1024), (640, 1408), (1408, 640), (768, 1280), (1280, 768), (896, 1152), (1152, 896)]
nearest_index = torch.argmin(torch.Tensor([abs((w / h) - (width / height))for (w, h) in available_res]))
nw, nh = available_res[nearest_index]
scale_factor = min(height / nh, width / nw)
start_image = start_image.permute(0,3,1,2)
start_image = F.resize(start_image, (int(height / scale_factor), int(width / scale_factor)))
height, width = start_image.shape[-2:]
start_image = F.crop(
start_image,
(height - nh) // 2,
@ -94,11 +92,10 @@ class Kandinsky5ImageToImage(io.ComfyNode):
nh,
nw,
)
print(start_image.shape)
start_image = start_image.permute(0,2,3,1)
encoded = vae.encode(start_image[:, :, :, :3])
out_latent = {"samples": encoded.repeat(batch_size, 1, 1, 1)}
return io.NodeOutput(positive, negative, out_latent)
return io.NodeOutput(out_latent, start_image)
def adaptive_mean_std_normalization(source, reference, clump_mean_low=0.3, clump_mean_high=0.35, clump_std_low=0.35, clump_std_high=0.5):
@ -147,35 +144,11 @@ class NormalizeVideoLatentStart(io.ComfyNode):
s["samples"] = samples
return io.NodeOutput(s)
class CLIPTextEncodeKandinsky5(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="CLIPTextEncodeKandinsky5",
category="advanced/conditioning/kandinsky5",
inputs=[
io.Clip.Input("clip"),
io.String.Input("clip_l", multiline=True, dynamic_prompts=True),
io.String.Input("qwen25_7b", multiline=True, dynamic_prompts=True),
],
outputs=[
io.Conditioning.Output(),
],
)
@classmethod
def execute(cls, clip, clip_l, qwen25_7b) -> io.NodeOutput:
tokens = clip.tokenize(clip_l)
tokens["qwen25_7b"] = clip.tokenize(qwen25_7b)["qwen25_7b"]
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens))
class TextEncodeQwenKandinskyI2I(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="TextEncodeQwenKandinskyI2I",
category="advanced/conditioning",
inputs=[
io.Clip.Input("clip"),
@ -200,7 +173,6 @@ class Kandinsky5Extension(ComfyExtension):
Kandinsky5ImageToImage,
NormalizeVideoLatentStart,
CLIPTextEncodeKandinsky5,
TextEncodeQwenKandinskyI2I,
]
async def comfy_entrypoint() -> Kandinsky5Extension: