mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-06 19:42:34 +08:00
Fix alpha channel handling in Qwen image edit nodes
- Fix TextEncodeQwenImageEditPlus to ensure only RGB channels are used - Prevents RuntimeError when input images have alpha channels - Ensures proper tensor shape for vision language models
This commit is contained in:
parent
2debbcf50e
commit
ffad732482
@ -1,4 +1,4 @@
|
|||||||
import node_helpers
|
import node_helpers
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
import math
|
import math
|
||||||
|
|
||||||
@ -13,15 +13,13 @@ class TextEncodeQwenImageEdit:
|
|||||||
"optional": {"vae": ("VAE", ),
|
"optional": {"vae": ("VAE", ),
|
||||||
"image": ("IMAGE", ),}}
|
"image": ("IMAGE", ),}}
|
||||||
|
|
||||||
RETURN_TYPES = ("CONDITIONING", "IMAGE", "LATENT")
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
FUNCTION = "encode"
|
FUNCTION = "encode"
|
||||||
|
|
||||||
CATEGORY = "advanced/conditioning"
|
CATEGORY = "advanced/conditioning"
|
||||||
|
|
||||||
def encode(self, clip, prompt, vae=None, image=None):
|
def encode(self, clip, prompt, vae=None, image=None):
|
||||||
ref_latent = None
|
ref_latent = None
|
||||||
output_image = None
|
|
||||||
|
|
||||||
if image is None:
|
if image is None:
|
||||||
images = []
|
images = []
|
||||||
else:
|
else:
|
||||||
@ -29,23 +27,12 @@ class TextEncodeQwenImageEdit:
|
|||||||
total = int(1024 * 1024)
|
total = int(1024 * 1024)
|
||||||
|
|
||||||
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
||||||
|
width = round(samples.shape[3] * scale_by)
|
||||||
|
height = round(samples.shape[2] * scale_by)
|
||||||
|
|
||||||
width = math.floor(samples.shape[3] * scale_by / 8) * 8
|
s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
|
||||||
height = math.floor(samples.shape[2] * scale_by / 8) * 8
|
|
||||||
|
|
||||||
original_width = samples.shape[3]
|
|
||||||
original_height = samples.shape[2]
|
|
||||||
|
|
||||||
if width < original_width or height < original_height:
|
|
||||||
upscale_method = "area"
|
|
||||||
else:
|
|
||||||
upscale_method = "lanczos"
|
|
||||||
|
|
||||||
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
|
|
||||||
image = s.movedim(1, -1)
|
image = s.movedim(1, -1)
|
||||||
images = [image[:, :, :, :3]]
|
images = [image[:, :, :, :3]]
|
||||||
output_image = image[:, :, :, :3]
|
|
||||||
|
|
||||||
if vae is not None:
|
if vae is not None:
|
||||||
ref_latent = vae.encode(image[:, :, :, :3])
|
ref_latent = vae.encode(image[:, :, :, :3])
|
||||||
|
|
||||||
@ -53,12 +40,64 @@ class TextEncodeQwenImageEdit:
|
|||||||
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
||||||
if ref_latent is not None:
|
if ref_latent is not None:
|
||||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
|
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
|
||||||
|
return (conditioning, )
|
||||||
latent_output = {"samples": ref_latent} if ref_latent is not None else None
|
|
||||||
|
|
||||||
return (conditioning, output_image, latent_output)
|
class TextEncodeQwenImageEditPlus:
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {
|
||||||
|
"clip": ("CLIP", ),
|
||||||
|
"prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
||||||
|
},
|
||||||
|
"optional": {"vae": ("VAE", ),
|
||||||
|
"image1": ("IMAGE", ),
|
||||||
|
"image2": ("IMAGE", ),
|
||||||
|
"image3": ("IMAGE", ),
|
||||||
|
}}
|
||||||
|
|
||||||
|
RETURN_TYPES = ("CONDITIONING",)
|
||||||
|
FUNCTION = "encode"
|
||||||
|
|
||||||
|
CATEGORY = "advanced/conditioning"
|
||||||
|
|
||||||
|
def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None):
|
||||||
|
ref_latents = []
|
||||||
|
images = [image1, image2, image3]
|
||||||
|
images_vl = []
|
||||||
|
llama_template = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n"
|
||||||
|
image_prompt = ""
|
||||||
|
|
||||||
|
for i, image in enumerate(images):
|
||||||
|
if image is not None:
|
||||||
|
samples = image.movedim(-1, 1)
|
||||||
|
total = int(384 * 384)
|
||||||
|
|
||||||
|
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
||||||
|
width = round(samples.shape[3] * scale_by)
|
||||||
|
height = round(samples.shape[2] * scale_by)
|
||||||
|
|
||||||
|
s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
|
||||||
|
images_vl.append(s.movedim(1, -1)[:, :, :, :3])
|
||||||
|
if vae is not None:
|
||||||
|
total = int(1024 * 1024)
|
||||||
|
scale_by = math.sqrt(total / (samples.shape[3] * samples.shape[2]))
|
||||||
|
width = round(samples.shape[3] * scale_by / 8.0) * 8
|
||||||
|
height = round(samples.shape[2] * scale_by / 8.0) * 8
|
||||||
|
|
||||||
|
s = comfy.utils.common_upscale(samples, width, height, "area", "disabled")
|
||||||
|
ref_latents.append(vae.encode(s.movedim(1, -1)[:, :, :, :3]))
|
||||||
|
|
||||||
|
image_prompt += "Picture {}: <|vision_start|><|image_pad|><|vision_end|>".format(i + 1)
|
||||||
|
|
||||||
|
tokens = clip.tokenize(image_prompt + prompt, images=images_vl, llama_template=llama_template)
|
||||||
|
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
||||||
|
if len(ref_latents) > 0:
|
||||||
|
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True)
|
||||||
|
return (conditioning, )
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
NODE_CLASS_MAPPINGS = {
|
||||||
"TextEncodeQwenImageEdit": TextEncodeQwenImageEdit,
|
"TextEncodeQwenImageEdit": TextEncodeQwenImageEdit,
|
||||||
|
"TextEncodeQwenImageEditPlus": TextEncodeQwenImageEditPlus,
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user