mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-03 02:00:29 +08:00
Small fixes Kandinsky5
This commit is contained in:
parent
c55a6fb271
commit
94d1df4b83
@ -1791,17 +1791,9 @@ class Kandinsky5Image(Kandinsky5):
|
||||
def concat_cond(self, **kwargs):
|
||||
return None
|
||||
|
||||
class Kandinsky5ImageToImage(BaseModel):
|
||||
class Kandinsky5ImageToImage(Kandinsky5):
|
||||
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
|
||||
super().__init__(
|
||||
model_config,
|
||||
model_type,
|
||||
device=device,
|
||||
unet_model=comfy.ldm.kandinsky5.model.Kandinsky5
|
||||
)
|
||||
|
||||
def encode_adm(self, **kwargs):
|
||||
return kwargs["pooled_output"]
|
||||
super().__init__(model_config, model_type, device=device)
|
||||
|
||||
def concat_cond(self, **kwargs):
|
||||
noise = kwargs["noise"]
|
||||
|
||||
@ -62,15 +62,15 @@ class Kandinsky5ImageToImage(io.ComfyNode):
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Kandinsky5ImageToImage",
|
||||
category="image",
|
||||
category="advanced/conditioning/kandinsky5",
|
||||
inputs=[
|
||||
io.Vae.Input("vae"),
|
||||
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||
io.Image.Input("start_image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Latent.Output(display_name="latent", tooltip="Empty video latent"),
|
||||
io.Image.Output("resized_image"),
|
||||
io.Latent.Output(display_name="latent", tooltip="Latent of resized source image"),
|
||||
io.Image.Output("resized_image", tooltip="Resized source image"),
|
||||
],
|
||||
)
|
||||
|
||||
@ -78,8 +78,8 @@ class Kandinsky5ImageToImage(io.ComfyNode):
|
||||
def execute(cls, vae, batch_size, start_image) -> io.NodeOutput:
|
||||
height, width = start_image.shape[1:-1]
|
||||
available_res = [(1024, 1024), (640, 1408), (1408, 640), (768, 1280), (1280, 768), (896, 1152), (1152, 896)]
|
||||
nearest_index = torch.argmin(torch.Tensor([abs((w / h) - (width / height))for (w, h) in available_res]))
|
||||
nw, nh = available_res[nearest_index]
|
||||
nearest_index = torch.argmin(torch.Tensor([abs((h / w) - (height / width))for (h, w) in available_res]))
|
||||
nh, nw = available_res[nearest_index]
|
||||
scale_factor = min(height / nh, width / nw)
|
||||
start_image = start_image.permute(0,3,1,2)
|
||||
start_image = F.resize(start_image, (int(height / scale_factor), int(width / scale_factor)))
|
||||
@ -150,7 +150,7 @@ class CLIPTextEncodeKandinsky5(io.ComfyNode):
|
||||
return io.Schema(
|
||||
node_id="CLIPTextEncodeKandinsky5",
|
||||
search_aliases=["kandinsky prompt"],
|
||||
category="advanced/conditioning",
|
||||
category="advanced/conditioning/kandinsky5",
|
||||
inputs=[
|
||||
io.Clip.Input("clip"),
|
||||
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||
|
||||
Loading…
Reference in New Issue
Block a user