Rewrite multigpu nodes to V3 format

Amp-Thread-ID: https://ampcode.com/threads/T-019d3ee9-19d5-767a-9d7a-e50cbbef815b
Co-authored-by: Amp <amp@ampcode.com>
This commit is contained in:
Jedrzej Kosinski 2026-03-30 07:23:13 -07:00
parent 84f465e791
commit d52dcbc88f

View File

@ -1,13 +1,17 @@
from __future__ import annotations from __future__ import annotations
from inspect import cleandoc
from inspect import cleandoc
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
if TYPE_CHECKING: if TYPE_CHECKING:
from comfy.model_patcher import ModelPatcher from comfy.model_patcher import ModelPatcher
import comfy.multigpu import comfy.multigpu
class MultiGPUWorkUnitsNode: class MultiGPUWorkUnitsNode(io.ComfyNode):
""" """
Prepares model to have sampling accelerated via splitting work units. Prepares model to have sampling accelerated via splitting work units.
@ -16,54 +20,53 @@ class MultiGPUWorkUnitsNode:
Other than those exceptions, this node can be placed in any order. Other than those exceptions, this node can be placed in any order.
""" """
NodeId = "MultiGPU_WorkUnits"
NodeName = "MultiGPU Work Units"
@classmethod @classmethod
def INPUT_TYPES(cls): def define_schema(cls):
return { return io.Schema(
"required": { node_id="MultiGPU_WorkUnits",
"model": ("MODEL",), display_name="MultiGPU Work Units",
"max_gpus" : ("INT", {"default": 8, "min": 1, "step": 1}), category="advanced/multigpu",
}, description=cleandoc(cls.__doc__),
"optional": { inputs=[
"gpu_options": ("GPU_OPTIONS",) io.Model.Input("model"),
} io.Int.Input("max_gpus", default=8, min=1, step=1),
} io.Custom("GPU_OPTIONS").Input("gpu_options", optional=True),
],
outputs=[
io.Model.Output(),
],
)
RETURN_TYPES = ("MODEL",) @classmethod
FUNCTION = "init_multigpu" def execute(cls, model: ModelPatcher, max_gpus: int, gpu_options: comfy.multigpu.GPUOptionsGroup = None) -> io.NodeOutput:
CATEGORY = "advanced/multigpu"
DESCRIPTION = cleandoc(__doc__)
def init_multigpu(self, model: ModelPatcher, max_gpus: int, gpu_options: comfy.multigpu.GPUOptionsGroup=None):
model = comfy.multigpu.create_multigpu_deepclones(model, max_gpus, gpu_options, reuse_loaded=True) model = comfy.multigpu.create_multigpu_deepclones(model, max_gpus, gpu_options, reuse_loaded=True)
return (model,) return io.NodeOutput(model)
class MultiGPUOptionsNode:
class MultiGPUOptionsNode(io.ComfyNode):
""" """
Select the relative speed of GPUs in the special case they have significantly different performance from one another. Select the relative speed of GPUs in the special case they have significantly different performance from one another.
""" """
NodeId = "MultiGPU_Options"
NodeName = "MultiGPU Options"
@classmethod @classmethod
def INPUT_TYPES(cls): def define_schema(cls):
return { return io.Schema(
"required": { node_id="MultiGPU_Options",
"device_index": ("INT", {"default": 0, "min": 0, "max": 64}), display_name="MultiGPU Options",
"relative_speed": ("FLOAT", {"default": 1.0, "min": 0.0, "step": 0.01}) category="advanced/multigpu",
}, description=cleandoc(cls.__doc__),
"optional": { inputs=[
"gpu_options": ("GPU_OPTIONS",) io.Int.Input("device_index", default=0, min=0, max=64),
} io.Float.Input("relative_speed", default=1.0, min=0.0, step=0.01),
} io.Custom("GPU_OPTIONS").Input("gpu_options", optional=True),
],
outputs=[
io.Custom("GPU_OPTIONS").Output(),
],
)
RETURN_TYPES = ("GPU_OPTIONS",) @classmethod
FUNCTION = "create_gpu_options" def execute(cls, device_index: int, relative_speed: float, gpu_options: comfy.multigpu.GPUOptionsGroup = None) -> io.NodeOutput:
CATEGORY = "advanced/multigpu"
DESCRIPTION = cleandoc(__doc__)
def create_gpu_options(self, device_index: int, relative_speed: float, gpu_options: comfy.multigpu.GPUOptionsGroup=None):
if not gpu_options: if not gpu_options:
gpu_options = comfy.multigpu.GPUOptionsGroup() gpu_options = comfy.multigpu.GPUOptionsGroup()
gpu_options.clone() gpu_options.clone()
@ -71,16 +74,17 @@ class MultiGPUOptionsNode:
opt = comfy.multigpu.GPUOptions(device_index=device_index, relative_speed=relative_speed) opt = comfy.multigpu.GPUOptions(device_index=device_index, relative_speed=relative_speed)
gpu_options.add(opt) gpu_options.add(opt)
return (gpu_options,) return io.NodeOutput(gpu_options)
node_list = [ class MultiGPUExtension(ComfyExtension):
MultiGPUWorkUnitsNode, @override
MultiGPUOptionsNode async def get_node_list(self) -> list[type[io.ComfyNode]]:
] return [
NODE_CLASS_MAPPINGS = {} MultiGPUWorkUnitsNode,
NODE_DISPLAY_NAME_MAPPINGS = {} MultiGPUOptionsNode,
]
for node in node_list:
NODE_CLASS_MAPPINGS[node.NodeId] = node async def comfy_entrypoint() -> MultiGPUExtension:
NODE_DISPLAY_NAME_MAPPINGS[node.NodeId] = node.NodeName return MultiGPUExtension()