mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-16 01:37:04 +08:00
convert nodes_qwen.py to V3 schema (#10049)
This commit is contained in:
parent
7eca95657c
commit
160698eb41
@ -1,24 +1,29 @@
|
|||||||
import node_helpers
|
import node_helpers
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
import math
|
import math
|
||||||
|
from typing_extensions import override
|
||||||
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
|
|
||||||
class TextEncodeQwenImageEdit:
|
class TextEncodeQwenImageEdit(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": {
|
return io.Schema(
|
||||||
"clip": ("CLIP", ),
|
node_id="TextEncodeQwenImageEdit",
|
||||||
"prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
category="advanced/conditioning",
|
||||||
},
|
inputs=[
|
||||||
"optional": {"vae": ("VAE", ),
|
io.Clip.Input("clip"),
|
||||||
"image": ("IMAGE", ),}}
|
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||||
|
io.Vae.Input("vae", optional=True),
|
||||||
|
io.Image.Input("image", optional=True),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Conditioning.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
@classmethod
|
||||||
FUNCTION = "encode"
|
def execute(cls, clip, prompt, vae=None, image=None) -> io.NodeOutput:
|
||||||
|
|
||||||
CATEGORY = "advanced/conditioning"
|
|
||||||
|
|
||||||
def encode(self, clip, prompt, vae=None, image=None):
|
|
||||||
ref_latent = None
|
ref_latent = None
|
||||||
if image is None:
|
if image is None:
|
||||||
images = []
|
images = []
|
||||||
@ -40,28 +45,30 @@ class TextEncodeQwenImageEdit:
|
|||||||
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
||||||
if ref_latent is not None:
|
if ref_latent is not None:
|
||||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
|
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": [ref_latent]}, append=True)
|
||||||
return (conditioning, )
|
return io.NodeOutput(conditioning)
|
||||||
|
|
||||||
|
|
||||||
class TextEncodeQwenImageEditPlus:
|
class TextEncodeQwenImageEditPlus(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": {
|
return io.Schema(
|
||||||
"clip": ("CLIP", ),
|
node_id="TextEncodeQwenImageEditPlus",
|
||||||
"prompt": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
category="advanced/conditioning",
|
||||||
},
|
inputs=[
|
||||||
"optional": {"vae": ("VAE", ),
|
io.Clip.Input("clip"),
|
||||||
"image1": ("IMAGE", ),
|
io.String.Input("prompt", multiline=True, dynamic_prompts=True),
|
||||||
"image2": ("IMAGE", ),
|
io.Vae.Input("vae", optional=True),
|
||||||
"image3": ("IMAGE", ),
|
io.Image.Input("image1", optional=True),
|
||||||
}}
|
io.Image.Input("image2", optional=True),
|
||||||
|
io.Image.Input("image3", optional=True),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Conditioning.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
@classmethod
|
||||||
FUNCTION = "encode"
|
def execute(cls, clip, prompt, vae=None, image1=None, image2=None, image3=None) -> io.NodeOutput:
|
||||||
|
|
||||||
CATEGORY = "advanced/conditioning"
|
|
||||||
|
|
||||||
def encode(self, clip, prompt, vae=None, image1=None, image2=None, image3=None):
|
|
||||||
ref_latents = []
|
ref_latents = []
|
||||||
images = [image1, image2, image3]
|
images = [image1, image2, image3]
|
||||||
images_vl = []
|
images_vl = []
|
||||||
@ -94,10 +101,17 @@ class TextEncodeQwenImageEditPlus:
|
|||||||
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
conditioning = clip.encode_from_tokens_scheduled(tokens)
|
||||||
if len(ref_latents) > 0:
|
if len(ref_latents) > 0:
|
||||||
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True)
|
conditioning = node_helpers.conditioning_set_values(conditioning, {"reference_latents": ref_latents}, append=True)
|
||||||
return (conditioning, )
|
return io.NodeOutput(conditioning)
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
class QwenExtension(ComfyExtension):
|
||||||
"TextEncodeQwenImageEdit": TextEncodeQwenImageEdit,
|
@override
|
||||||
"TextEncodeQwenImageEditPlus": TextEncodeQwenImageEditPlus,
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
}
|
return [
|
||||||
|
TextEncodeQwenImageEdit,
|
||||||
|
TextEncodeQwenImageEditPlus,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> QwenExtension:
|
||||||
|
return QwenExtension()
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user