convert nodes_upscale_model.py to V3 schema (#10149)

This commit is contained in:
Alexander Piskun 2025-10-10 02:08:40 +03:00 committed by GitHub
parent 90853fb9cd
commit 81e4dac107
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 51 additions and 27 deletions

View File

@ -4,6 +4,8 @@ from comfy import model_management
import torch import torch
import comfy.utils import comfy.utils
import folder_paths import folder_paths
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
try: try:
from spandrel_extra_arches import EXTRA_REGISTRY from spandrel_extra_arches import EXTRA_REGISTRY
@ -13,17 +15,23 @@ try:
except: except:
pass pass
class UpscaleModelLoader: class UpscaleModelLoader(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": { "model_name": (folder_paths.get_filename_list("upscale_models"), ), return io.Schema(
}} node_id="UpscaleModelLoader",
RETURN_TYPES = ("UPSCALE_MODEL",) display_name="Load Upscale Model",
FUNCTION = "load_model" category="loaders",
inputs=[
io.Combo.Input("model_name", options=folder_paths.get_filename_list("upscale_models")),
],
outputs=[
io.UpscaleModel.Output(),
],
)
CATEGORY = "loaders" @classmethod
def execute(cls, model_name) -> io.NodeOutput:
def load_model(self, model_name):
model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name) model_path = folder_paths.get_full_path_or_raise("upscale_models", model_name)
sd = comfy.utils.load_torch_file(model_path, safe_load=True) sd = comfy.utils.load_torch_file(model_path, safe_load=True)
if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd: if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd:
@ -33,21 +41,29 @@ class UpscaleModelLoader:
if not isinstance(out, ImageModelDescriptor): if not isinstance(out, ImageModelDescriptor):
raise Exception("Upscale model must be a single-image model.") raise Exception("Upscale model must be a single-image model.")
return (out, ) return io.NodeOutput(out)
load_model = execute # TODO: remove
class ImageUpscaleWithModel: class ImageUpscaleWithModel(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": { "upscale_model": ("UPSCALE_MODEL",), return io.Schema(
"image": ("IMAGE",), node_id="ImageUpscaleWithModel",
}} display_name="Upscale Image (using Model)",
RETURN_TYPES = ("IMAGE",) category="image/upscaling",
FUNCTION = "upscale" inputs=[
io.UpscaleModel.Input("upscale_model"),
io.Image.Input("image"),
],
outputs=[
io.Image.Output(),
],
)
CATEGORY = "image/upscaling" @classmethod
def execute(cls, upscale_model, image) -> io.NodeOutput:
def upscale(self, upscale_model, image):
device = model_management.get_torch_device() device = model_management.get_torch_device()
memory_required = model_management.module_size(upscale_model.model) memory_required = model_management.module_size(upscale_model.model)
@ -75,9 +91,19 @@ class ImageUpscaleWithModel:
upscale_model.to("cpu") upscale_model.to("cpu")
s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0) s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0)
return (s,) return io.NodeOutput(s)
NODE_CLASS_MAPPINGS = { upscale = execute # TODO: remove
"UpscaleModelLoader": UpscaleModelLoader,
"ImageUpscaleWithModel": ImageUpscaleWithModel
} class UpscaleModelExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
UpscaleModelLoader,
ImageUpscaleWithModel,
]
async def comfy_entrypoint() -> UpscaleModelExtension:
return UpscaleModelExtension()

View File

@ -2027,7 +2027,6 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"DiffControlNetLoader": "Load ControlNet Model (diff)", "DiffControlNetLoader": "Load ControlNet Model (diff)",
"StyleModelLoader": "Load Style Model", "StyleModelLoader": "Load Style Model",
"CLIPVisionLoader": "Load CLIP Vision", "CLIPVisionLoader": "Load CLIP Vision",
"UpscaleModelLoader": "Load Upscale Model",
"UNETLoader": "Load Diffusion Model", "UNETLoader": "Load Diffusion Model",
# Conditioning # Conditioning
"CLIPVisionEncode": "CLIP Vision Encode", "CLIPVisionEncode": "CLIP Vision Encode",
@ -2065,7 +2064,6 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"LoadImageOutput": "Load Image (from Outputs)", "LoadImageOutput": "Load Image (from Outputs)",
"ImageScale": "Upscale Image", "ImageScale": "Upscale Image",
"ImageScaleBy": "Upscale Image By", "ImageScaleBy": "Upscale Image By",
"ImageUpscaleWithModel": "Upscale Image (using Model)",
"ImageInvert": "Invert Image", "ImageInvert": "Invert Image",
"ImagePadForOutpaint": "Pad Image for Outpainting", "ImagePadForOutpaint": "Pad Image for Outpainting",
"ImageBatch": "Batch Images", "ImageBatch": "Batch Images",