Update node categories and display names

This commit is contained in:
Alexis Rolland 2026-05-07 19:55:08 +08:00
parent 6bcd8b96ab
commit 2edcd9a880
16 changed files with 26 additions and 17 deletions

View File

@ -10,6 +10,7 @@ class AudioEncoderLoader(io.ComfyNode):
def define_schema(cls) -> io.Schema: def define_schema(cls) -> io.Schema:
return io.Schema( return io.Schema(
node_id="AudioEncoderLoader", node_id="AudioEncoderLoader",
display_name="Load Audio Encoder",
category="loaders", category="loaders",
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(

View File

@ -153,7 +153,7 @@ class WanCameraEmbedding(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="WanCameraEmbedding", node_id="WanCameraEmbedding",
category="camera", category="conditioning/video_models",
inputs=[ inputs=[
io.Combo.Input( io.Combo.Input(
"camera_pose", "camera_pose",

View File

@ -10,7 +10,7 @@ class ContextWindowsManualNode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ContextWindowsManual", node_id="ContextWindowsManual",
display_name="Context Windows (Manual)", display_name="Context Windows (Manual)",
category="context", category="model_patches",
description="Manually set context windows.", description="Manually set context windows.",
inputs=[ inputs=[
io.Model.Input("model", tooltip="The model to apply context windows to during sampling."), io.Model.Input("model", tooltip="The model to apply context windows to during sampling."),

View File

@ -131,6 +131,8 @@ class HunyuanVideo15SuperResolution(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="HunyuanVideo15SuperResolution", node_id="HunyuanVideo15SuperResolution",
display_name="Hunyuan Video 1.5 Super Resolution",
category="conditioning/video_models",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),
@ -381,6 +383,8 @@ class HunyuanRefinerLatent(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="HunyuanRefinerLatent", node_id="HunyuanRefinerLatent",
display_name="Hunyuan Latent Refiner",
category="conditioning/video_models",
inputs=[ inputs=[
io.Conditioning.Input("positive"), io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"), io.Conditioning.Input("negative"),

View File

@ -40,7 +40,7 @@ class Hunyuan3Dv2Conditioning(IO.ComfyNode):
def define_schema(cls): def define_schema(cls):
return IO.Schema( return IO.Schema(
node_id="Hunyuan3Dv2Conditioning", node_id="Hunyuan3Dv2Conditioning",
category="conditioning/video_models", category="conditioning/3d_models",
inputs=[ inputs=[
IO.ClipVisionOutput.Input("clip_vision_output"), IO.ClipVisionOutput.Input("clip_vision_output"),
], ],
@ -65,7 +65,7 @@ class Hunyuan3Dv2ConditioningMultiView(IO.ComfyNode):
def define_schema(cls): def define_schema(cls):
return IO.Schema( return IO.Schema(
node_id="Hunyuan3Dv2ConditioningMultiView", node_id="Hunyuan3Dv2ConditioningMultiView",
category="conditioning/video_models", category="conditioning/3d_models",
inputs=[ inputs=[
IO.ClipVisionOutput.Input("front", optional=True), IO.ClipVisionOutput.Input("front", optional=True),
IO.ClipVisionOutput.Input("left", optional=True), IO.ClipVisionOutput.Input("left", optional=True),

View File

@ -102,6 +102,7 @@ class HypernetworkLoader(IO.ComfyNode):
def define_schema(cls): def define_schema(cls):
return IO.Schema( return IO.Schema(
node_id="HypernetworkLoader", node_id="HypernetworkLoader",
display_name="Load Hypernetwork",
category="loaders", category="loaders",
inputs=[ inputs=[
IO.Model.Input("model"), IO.Model.Input("model"),

View File

@ -70,7 +70,7 @@ class MathExpressionNode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ComfyMathExpression", node_id="ComfyMathExpression",
display_name="Math Expression", display_name="Math Expression",
category="math", category="logic",
search_aliases=[ search_aliases=[
"expression", "formula", "calculate", "calculator", "expression", "formula", "calculate", "calculator",
"eval", "math", "eval", "math",

View File

@ -21,7 +21,7 @@ class NumberConvertNode(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="ComfyNumberConvert", node_id="ComfyNumberConvert",
display_name="Number Convert", display_name="Number Convert",
category="math", category="utils",
search_aliases=[ search_aliases=[
"int to float", "float to int", "number convert", "int to float", "float to int", "number convert",
"int2float", "float2int", "cast", "parse number", "int2float", "float2int", "cast", "parse number",

View File

@ -116,6 +116,7 @@ class Quantize(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="ImageQuantize", node_id="ImageQuantize",
display_name="Quantize Image",
category="image/postprocessing", category="image/postprocessing",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
@ -181,6 +182,7 @@ class Sharpen(io.ComfyNode):
def define_schema(cls): def define_schema(cls):
return io.Schema( return io.Schema(
node_id="ImageSharpen", node_id="ImageSharpen",
display_name="Sharpen Image",
category="image/postprocessing", category="image/postprocessing",
inputs=[ inputs=[
io.Image.Input("image"), io.Image.Input("image"),
@ -436,7 +438,7 @@ class ResizeImageMaskNode(io.ComfyNode):
node_id="ResizeImageMaskNode", node_id="ResizeImageMaskNode",
display_name="Resize Image/Mask", display_name="Resize Image/Mask",
description="Resize an image or mask using various scaling methods.", description="Resize an image or mask using various scaling methods.",
category="transform", category="image/transform",
search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"], search_aliases=["resize", "resize image", "resize mask", "scale", "scale image", "scale mask", "image resize", "change size", "dimensions", "shrink", "enlarge"],
inputs=[ inputs=[
io.MatchType.Input("input", template=template), io.MatchType.Input("input", template=template),

View File

@ -15,7 +15,7 @@ class RTDETR_detect(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="RTDETR_detect", node_id="RTDETR_detect",
display_name="RT-DETR Detect", display_name="RT-DETR Detect",
category="detection/", category="detection",
search_aliases=["bbox", "bounding box", "object detection", "coco"], search_aliases=["bbox", "bounding box", "object detection", "coco"],
inputs=[ inputs=[
io.Model.Input("model", display_name="model"), io.Model.Input("model", display_name="model"),
@ -71,7 +71,7 @@ class DrawBBoxes(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="DrawBBoxes", node_id="DrawBBoxes",
display_name="Draw BBoxes", display_name="Draw BBoxes",
category="detection/", category="detection",
search_aliases=["bbox", "bounding box", "object detection", "rt_detr", "visualize detections", "coco"], search_aliases=["bbox", "bounding box", "object detection", "rt_detr", "visualize detections", "coco"],
inputs=[ inputs=[
io.Image.Input("image", optional=True), io.Image.Input("image", optional=True),

View File

@ -93,7 +93,7 @@ class SAM3_Detect(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SAM3_Detect", node_id="SAM3_Detect",
display_name="SAM3 Detect", display_name="SAM3 Detect",
category="detection/", category="detection",
search_aliases=["sam3", "segment anything", "open vocabulary", "text detection", "segment"], search_aliases=["sam3", "segment anything", "open vocabulary", "text detection", "segment"],
inputs=[ inputs=[
io.Model.Input("model", display_name="model"), io.Model.Input("model", display_name="model"),
@ -265,7 +265,7 @@ class SAM3_VideoTrack(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SAM3_VideoTrack", node_id="SAM3_VideoTrack",
display_name="SAM3 Video Track", display_name="SAM3 Video Track",
category="detection/", category="detection",
search_aliases=["sam3", "video", "track", "propagate"], search_aliases=["sam3", "video", "track", "propagate"],
inputs=[ inputs=[
io.Image.Input("images", display_name="images", tooltip="Video frames as batched images"), io.Image.Input("images", display_name="images", tooltip="Video frames as batched images"),
@ -321,7 +321,7 @@ class SAM3_TrackPreview(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SAM3_TrackPreview", node_id="SAM3_TrackPreview",
display_name="SAM3 Track Preview", display_name="SAM3 Track Preview",
category="detection/", category="detection",
inputs=[ inputs=[
SAM3TrackData.Input("track_data", display_name="track_data"), SAM3TrackData.Input("track_data", display_name="track_data"),
io.Image.Input("images", display_name="images", optional=True), io.Image.Input("images", display_name="images", optional=True),
@ -475,7 +475,7 @@ class SAM3_TrackToMask(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="SAM3_TrackToMask", node_id="SAM3_TrackToMask",
display_name="SAM3 Track to Mask", display_name="SAM3 Track to Mask",
category="detection/", category="detection",
inputs=[ inputs=[
SAM3TrackData.Input("track_data", display_name="track_data"), SAM3TrackData.Input("track_data", display_name="track_data"),
io.String.Input("object_indices", display_name="object_indices", default="", io.String.Input("object_indices", display_name="object_indices", default="",

View File

@ -26,7 +26,7 @@ class TextGenerate(io.ComfyNode):
return io.Schema( return io.Schema(
node_id="TextGenerate", node_id="TextGenerate",
category="textgen", category="text",
search_aliases=["LLM", "gemma"], search_aliases=["LLM", "gemma"],
inputs=[ inputs=[
io.Clip.Input("clip"), io.Clip.Input("clip"),

View File

@ -1361,7 +1361,7 @@ class SaveLoRA(io.ComfyNode):
node_id="SaveLoRA", node_id="SaveLoRA",
search_aliases=["export lora"], search_aliases=["export lora"],
display_name="Save LoRA Weights", display_name="Save LoRA Weights",
category="loaders", category="advanced/model_merging",
is_experimental=True, is_experimental=True,
is_output_node=True, is_output_node=True,
inputs=[ inputs=[

View File

@ -15,7 +15,7 @@ class ImageOnlyCheckpointLoader:
RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE") RETURN_TYPES = ("MODEL", "CLIP_VISION", "VAE")
FUNCTION = "load_checkpoint" FUNCTION = "load_checkpoint"
CATEGORY = "loaders/video_models" CATEGORY = "loaders"
def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True): def load_checkpoint(self, ckpt_name, output_vae=True, output_clip=True):
ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name) ckpt_path = folder_paths.get_full_path_or_raise("checkpoints", ckpt_name)

View File

@ -22,7 +22,7 @@ class SaveImageWebsocket:
OUTPUT_NODE = True OUTPUT_NODE = True
CATEGORY = "api/image" CATEGORY = "image"
def save_images(self, images): def save_images(self, images):
pbar = comfy.utils.ProgressBar(images.shape[0]) pbar = comfy.utils.ProgressBar(images.shape[0])

View File

@ -2092,6 +2092,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"StyleModelLoader": "Load Style Model", "StyleModelLoader": "Load Style Model",
"CLIPVisionLoader": "Load CLIP Vision", "CLIPVisionLoader": "Load CLIP Vision",
"UNETLoader": "Load Diffusion Model", "UNETLoader": "Load Diffusion Model",
"unCLIPCheckpointLoader": "Load unCLIP Checkpoint",
# Conditioning # Conditioning
"CLIPVisionEncode": "CLIP Vision Encode", "CLIPVisionEncode": "CLIP Vision Encode",
"StyleModelApply": "Apply Style Model", "StyleModelApply": "Apply Style Model",