mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-24 05:10:18 +08:00
feat: Add search_aliases field to node schema (#12010)
* feat: Add search_aliases field to node schema
Adds `search_aliases` field to improve node discoverability. Users can define alternative search terms for nodes (e.g., "text concat" → StringConcatenate).
Changes:
- Add `search_aliases: list[str]` to V3 Schema
- Add `SEARCH_ALIASES` support for V1 nodes
- Include field in `/object_info` response
- Add aliases to high-priority core nodes
V1 usage:
```python
class MyNode:
SEARCH_ALIASES = ["alt name", "synonym"]
```
V3 usage:
```python
io.Schema(
node_id="MyNode",
search_aliases=["alt name", "synonym"],
...
)
```
## Related PRs
- Frontend: Comfy-Org/ComfyUI_frontend#XXXX (draft - merge after this)
- Docs: Comfy-Org/docs#XXXX (draft - merge after stable)
* Propagate search_aliases through V3 Schema.get_v1_info to NodeInfoV1
This commit is contained in:
parent
451af70154
commit
bdeac8897e
@ -1249,6 +1249,7 @@ class NodeInfoV1:
|
|||||||
experimental: bool=None
|
experimental: bool=None
|
||||||
api_node: bool=None
|
api_node: bool=None
|
||||||
price_badge: dict | None = None
|
price_badge: dict | None = None
|
||||||
|
search_aliases: list[str]=None
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class NodeInfoV3:
|
class NodeInfoV3:
|
||||||
@ -1346,6 +1347,8 @@ class Schema:
|
|||||||
hidden: list[Hidden] = field(default_factory=list)
|
hidden: list[Hidden] = field(default_factory=list)
|
||||||
description: str=""
|
description: str=""
|
||||||
"""Node description, shown as a tooltip when hovering over the node."""
|
"""Node description, shown as a tooltip when hovering over the node."""
|
||||||
|
search_aliases: list[str] = field(default_factory=list)
|
||||||
|
"""Alternative names for search. Useful for synonyms, abbreviations, or old names after renaming."""
|
||||||
is_input_list: bool = False
|
is_input_list: bool = False
|
||||||
"""A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
|
"""A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
|
||||||
|
|
||||||
@ -1483,6 +1486,7 @@ class Schema:
|
|||||||
api_node=self.is_api_node,
|
api_node=self.is_api_node,
|
||||||
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
|
python_module=getattr(cls, "RELATIVE_PYTHON_MODULE", "nodes"),
|
||||||
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
|
price_badge=self.price_badge.as_dict(self.inputs) if self.price_badge is not None else None,
|
||||||
|
search_aliases=self.search_aliases if self.search_aliases else None,
|
||||||
)
|
)
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|||||||
@ -550,6 +550,7 @@ class BatchImagesNode(io.ComfyNode):
|
|||||||
node_id="BatchImagesNode",
|
node_id="BatchImagesNode",
|
||||||
display_name="Batch Images",
|
display_name="Batch Images",
|
||||||
category="image",
|
category="image",
|
||||||
|
search_aliases=["batch", "image batch", "batch images", "combine images", "merge images", "stack images"],
|
||||||
inputs=[
|
inputs=[
|
||||||
io.Autogrow.Input("images", template=autogrow_template)
|
io.Autogrow.Input("images", template=autogrow_template)
|
||||||
],
|
],
|
||||||
|
|||||||
@ -16,6 +16,7 @@ class PreviewAny():
|
|||||||
OUTPUT_NODE = True
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
CATEGORY = "utils"
|
CATEGORY = "utils"
|
||||||
|
SEARCH_ALIASES = ["preview", "show", "display", "view", "show text", "display text", "preview text", "show output", "inspect", "debug"]
|
||||||
|
|
||||||
def main(self, source=None):
|
def main(self, source=None):
|
||||||
value = 'None'
|
value = 'None'
|
||||||
|
|||||||
@ -11,6 +11,7 @@ class StringConcatenate(io.ComfyNode):
|
|||||||
node_id="StringConcatenate",
|
node_id="StringConcatenate",
|
||||||
display_name="Concatenate",
|
display_name="Concatenate",
|
||||||
category="utils/string",
|
category="utils/string",
|
||||||
|
search_aliases=["text concat", "join text", "merge text", "combine strings", "concat", "concatenate", "append text", "combine text", "string"],
|
||||||
inputs=[
|
inputs=[
|
||||||
io.String.Input("string_a", multiline=True),
|
io.String.Input("string_a", multiline=True),
|
||||||
io.String.Input("string_b", multiline=True),
|
io.String.Input("string_b", multiline=True),
|
||||||
|
|||||||
@ -53,6 +53,7 @@ class ImageUpscaleWithModel(io.ComfyNode):
|
|||||||
node_id="ImageUpscaleWithModel",
|
node_id="ImageUpscaleWithModel",
|
||||||
display_name="Upscale Image (using Model)",
|
display_name="Upscale Image (using Model)",
|
||||||
category="image/upscaling",
|
category="image/upscaling",
|
||||||
|
search_aliases=["upscale", "upscaler", "upsc", "enlarge image", "super resolution", "hires", "superres", "increase resolution"],
|
||||||
inputs=[
|
inputs=[
|
||||||
io.UpscaleModel.Input("upscale_model"),
|
io.UpscaleModel.Input("upscale_model"),
|
||||||
io.Image.Input("image"),
|
io.Image.Input("image"),
|
||||||
|
|||||||
15
nodes.py
15
nodes.py
@ -70,6 +70,7 @@ class CLIPTextEncode(ComfyNodeABC):
|
|||||||
|
|
||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning"
|
||||||
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
|
DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
|
||||||
|
SEARCH_ALIASES = ["text", "prompt", "text prompt", "positive prompt", "negative prompt", "encode text", "text encoder", "encode prompt"]
|
||||||
|
|
||||||
def encode(self, clip, text):
|
def encode(self, clip, text):
|
||||||
if clip is None:
|
if clip is None:
|
||||||
@ -86,6 +87,7 @@ class ConditioningCombine:
|
|||||||
FUNCTION = "combine"
|
FUNCTION = "combine"
|
||||||
|
|
||||||
CATEGORY = "conditioning"
|
CATEGORY = "conditioning"
|
||||||
|
SEARCH_ALIASES = ["combine", "merge conditioning", "combine prompts", "merge prompts", "mix prompts", "add prompt"]
|
||||||
|
|
||||||
def combine(self, conditioning_1, conditioning_2):
|
def combine(self, conditioning_1, conditioning_2):
|
||||||
return (conditioning_1 + conditioning_2, )
|
return (conditioning_1 + conditioning_2, )
|
||||||
@ -294,6 +296,7 @@ class VAEDecode:
|
|||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent"
|
||||||
DESCRIPTION = "Decodes latent images back into pixel space images."
|
DESCRIPTION = "Decodes latent images back into pixel space images."
|
||||||
|
SEARCH_ALIASES = ["decode", "decode latent", "latent to image", "render latent"]
|
||||||
|
|
||||||
def decode(self, vae, samples):
|
def decode(self, vae, samples):
|
||||||
latent = samples["samples"]
|
latent = samples["samples"]
|
||||||
@ -346,6 +349,7 @@ class VAEEncode:
|
|||||||
FUNCTION = "encode"
|
FUNCTION = "encode"
|
||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent"
|
||||||
|
SEARCH_ALIASES = ["encode", "encode image", "image to latent"]
|
||||||
|
|
||||||
def encode(self, vae, pixels):
|
def encode(self, vae, pixels):
|
||||||
t = vae.encode(pixels)
|
t = vae.encode(pixels)
|
||||||
@ -581,6 +585,7 @@ class CheckpointLoaderSimple:
|
|||||||
|
|
||||||
CATEGORY = "loaders"
|
CATEGORY = "loaders"
|
||||||
DESCRIPTION = "Loads a diffusion model checkpoint, diffusion models are used to denoise latents."
|
DESCRIPTION = "Loads a diffusion model checkpoint, diffusion models are used to denoise latents."
|
||||||
|
SEARCH_ALIASES = ["load model", "checkpoint", "model loader", "load checkpoint", "ckpt", "model"]
|
||||||
|
|
||||||
def load_checkpoint(self, ckpt_name):
|
def load_checkpoint(self, ckpt_name):
|
||||||
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
|
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)
|
||||||
@ -667,6 +672,7 @@ class LoraLoader:
|
|||||||
|
|
||||||
CATEGORY = "loaders"
|
CATEGORY = "loaders"
|
||||||
DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together."
|
DESCRIPTION = "LoRAs are used to modify diffusion and CLIP models, altering the way in which latents are denoised such as applying styles. Multiple LoRA nodes can be linked together."
|
||||||
|
SEARCH_ALIASES = ["lora", "load lora", "apply lora", "lora loader", "lora model"]
|
||||||
|
|
||||||
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
def load_lora(self, model, clip, lora_name, strength_model, strength_clip):
|
||||||
if strength_model == 0 and strength_clip == 0:
|
if strength_model == 0 and strength_clip == 0:
|
||||||
@ -814,6 +820,7 @@ class ControlNetLoader:
|
|||||||
FUNCTION = "load_controlnet"
|
FUNCTION = "load_controlnet"
|
||||||
|
|
||||||
CATEGORY = "loaders"
|
CATEGORY = "loaders"
|
||||||
|
SEARCH_ALIASES = ["controlnet", "control net", "cn", "load controlnet", "controlnet loader"]
|
||||||
|
|
||||||
def load_controlnet(self, control_net_name):
|
def load_controlnet(self, control_net_name):
|
||||||
controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name)
|
controlnet_path = folder_paths.get_full_path_or_raise("controlnet", control_net_name)
|
||||||
@ -890,6 +897,7 @@ class ControlNetApplyAdvanced:
|
|||||||
FUNCTION = "apply_controlnet"
|
FUNCTION = "apply_controlnet"
|
||||||
|
|
||||||
CATEGORY = "conditioning/controlnet"
|
CATEGORY = "conditioning/controlnet"
|
||||||
|
SEARCH_ALIASES = ["controlnet", "apply controlnet", "use controlnet", "control net"]
|
||||||
|
|
||||||
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]):
|
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]):
|
||||||
if strength == 0:
|
if strength == 0:
|
||||||
@ -1200,6 +1208,7 @@ class EmptyLatentImage:
|
|||||||
|
|
||||||
CATEGORY = "latent"
|
CATEGORY = "latent"
|
||||||
DESCRIPTION = "Create a new batch of empty latent images to be denoised via sampling."
|
DESCRIPTION = "Create a new batch of empty latent images to be denoised via sampling."
|
||||||
|
SEARCH_ALIASES = ["empty", "empty latent", "new latent", "create latent", "blank latent", "blank"]
|
||||||
|
|
||||||
def generate(self, width, height, batch_size=1):
|
def generate(self, width, height, batch_size=1):
|
||||||
latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
|
latent = torch.zeros([batch_size, 4, height // 8, width // 8], device=self.device)
|
||||||
@ -1540,6 +1549,7 @@ class KSampler:
|
|||||||
|
|
||||||
CATEGORY = "sampling"
|
CATEGORY = "sampling"
|
||||||
DESCRIPTION = "Uses the provided model, positive and negative conditioning to denoise the latent image."
|
DESCRIPTION = "Uses the provided model, positive and negative conditioning to denoise the latent image."
|
||||||
|
SEARCH_ALIASES = ["sampler", "sample", "generate", "denoise", "diffuse", "txt2img", "img2img"]
|
||||||
|
|
||||||
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
|
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=1.0):
|
||||||
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
|
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
|
||||||
@ -1604,6 +1614,7 @@ class SaveImage:
|
|||||||
|
|
||||||
CATEGORY = "image"
|
CATEGORY = "image"
|
||||||
DESCRIPTION = "Saves the input images to your ComfyUI output directory."
|
DESCRIPTION = "Saves the input images to your ComfyUI output directory."
|
||||||
|
SEARCH_ALIASES = ["save", "save image", "export image", "output image", "write image", "download"]
|
||||||
|
|
||||||
def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
|
def save_images(self, images, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
|
||||||
filename_prefix += self.prefix_append
|
filename_prefix += self.prefix_append
|
||||||
@ -1640,6 +1651,8 @@ class PreviewImage(SaveImage):
|
|||||||
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
|
self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5))
|
||||||
self.compress_level = 1
|
self.compress_level = 1
|
||||||
|
|
||||||
|
SEARCH_ALIASES = ["preview", "preview image", "show image", "view image", "display image", "image viewer"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required":
|
return {"required":
|
||||||
@ -1658,6 +1671,7 @@ class LoadImage:
|
|||||||
}
|
}
|
||||||
|
|
||||||
CATEGORY = "image"
|
CATEGORY = "image"
|
||||||
|
SEARCH_ALIASES = ["load image", "open image", "import image", "image input", "upload image", "read image", "image loader"]
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE", "MASK")
|
RETURN_TYPES = ("IMAGE", "MASK")
|
||||||
FUNCTION = "load_image"
|
FUNCTION = "load_image"
|
||||||
@ -1810,6 +1824,7 @@ class ImageScale:
|
|||||||
FUNCTION = "upscale"
|
FUNCTION = "upscale"
|
||||||
|
|
||||||
CATEGORY = "image/upscaling"
|
CATEGORY = "image/upscaling"
|
||||||
|
SEARCH_ALIASES = ["resize", "resize image", "scale image", "image resize", "zoom", "zoom in", "change size"]
|
||||||
|
|
||||||
def upscale(self, image, upscale_method, width, height, crop):
|
def upscale(self, image, upscale_method, width, height, crop):
|
||||||
if width == 0 and height == 0:
|
if width == 0 and height == 0:
|
||||||
|
|||||||
@ -682,6 +682,8 @@ class PromptServer():
|
|||||||
|
|
||||||
if hasattr(obj_class, 'API_NODE'):
|
if hasattr(obj_class, 'API_NODE'):
|
||||||
info['api_node'] = obj_class.API_NODE
|
info['api_node'] = obj_class.API_NODE
|
||||||
|
|
||||||
|
info['search_aliases'] = getattr(obj_class, 'SEARCH_ALIASES', [])
|
||||||
return info
|
return info
|
||||||
|
|
||||||
@routes.get("/object_info")
|
@routes.get("/object_info")
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user