convert nodes_luma.py to V3 schema (#10030)

This commit is contained in:
Alexander Piskun 2025-09-27 12:28:11 +03:00 committed by GitHub
parent 6b4b671ce7
commit bcfd80dd79
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,7 +1,8 @@
from __future__ import annotations from __future__ import annotations
from inspect import cleandoc from inspect import cleandoc
from typing import Optional from typing import Optional
from comfy.comfy_types.node_typing import IO, ComfyNodeABC from typing_extensions import override
from comfy_api.latest import ComfyExtension, io as comfy_io
from comfy_api.input_impl.video_types import VideoFromFile from comfy_api.input_impl.video_types import VideoFromFile
from comfy_api_nodes.apis.luma_api import ( from comfy_api_nodes.apis.luma_api import (
LumaImageModel, LumaImageModel,
@ -51,174 +52,186 @@ def image_result_url_extractor(response: LumaGeneration):
def video_result_url_extractor(response: LumaGeneration): def video_result_url_extractor(response: LumaGeneration):
return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None
class LumaReferenceNode(ComfyNodeABC): class LumaReferenceNode(comfy_io.ComfyNode):
""" """
Holds an image and weight for use with Luma Generate Image node. Holds an image and weight for use with Luma Generate Image node.
""" """
RETURN_TYPES = (LumaIO.LUMA_REF,) @classmethod
RETURN_NAMES = ("luma_ref",) def define_schema(cls) -> comfy_io.Schema:
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value return comfy_io.Schema(
FUNCTION = "create_luma_reference" node_id="LumaReferenceNode",
CATEGORY = "api node/image/Luma" display_name="Luma Reference",
category="api node/image/Luma",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Image.Input(
"image",
tooltip="Image to use as reference.",
),
comfy_io.Float.Input(
"weight",
default=1.0,
min=0.0,
max=1.0,
step=0.01,
tooltip="Weight of image reference.",
),
comfy_io.Custom(LumaIO.LUMA_REF).Input(
"luma_ref",
optional=True,
),
],
outputs=[comfy_io.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(
return { cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None
"required": { ) -> comfy_io.NodeOutput:
"image": (
IO.IMAGE,
{
"tooltip": "Image to use as reference.",
},
),
"weight": (
IO.FLOAT,
{
"default": 1.0,
"min": 0.0,
"max": 1.0,
"step": 0.01,
"tooltip": "Weight of image reference.",
},
),
},
"optional": {"luma_ref": (LumaIO.LUMA_REF,)},
}
def create_luma_reference(
self, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None
):
if luma_ref is not None: if luma_ref is not None:
luma_ref = luma_ref.clone() luma_ref = luma_ref.clone()
else: else:
luma_ref = LumaReferenceChain() luma_ref = LumaReferenceChain()
luma_ref.add(LumaReference(image=image, weight=round(weight, 2))) luma_ref.add(LumaReference(image=image, weight=round(weight, 2)))
return (luma_ref,) return comfy_io.NodeOutput(luma_ref)
class LumaConceptsNode(ComfyNodeABC): class LumaConceptsNode(comfy_io.ComfyNode):
""" """
Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes. Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.
""" """
RETURN_TYPES = (LumaIO.LUMA_CONCEPTS,) @classmethod
RETURN_NAMES = ("luma_concepts",) def define_schema(cls) -> comfy_io.Schema:
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value return comfy_io.Schema(
FUNCTION = "create_concepts" node_id="LumaConceptsNode",
CATEGORY = "api node/video/Luma" display_name="Luma Concepts",
category="api node/video/Luma",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Combo.Input(
"concept1",
options=get_luma_concepts(include_none=True),
),
comfy_io.Combo.Input(
"concept2",
options=get_luma_concepts(include_none=True),
),
comfy_io.Combo.Input(
"concept3",
options=get_luma_concepts(include_none=True),
),
comfy_io.Combo.Input(
"concept4",
options=get_luma_concepts(include_none=True),
),
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input(
"luma_concepts",
tooltip="Optional Camera Concepts to add to the ones chosen here.",
optional=True,
),
],
outputs=[comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(
return { cls,
"required": {
"concept1": (get_luma_concepts(include_none=True),),
"concept2": (get_luma_concepts(include_none=True),),
"concept3": (get_luma_concepts(include_none=True),),
"concept4": (get_luma_concepts(include_none=True),),
},
"optional": {
"luma_concepts": (
LumaIO.LUMA_CONCEPTS,
{
"tooltip": "Optional Camera Concepts to add to the ones chosen here."
},
),
},
}
def create_concepts(
self,
concept1: str, concept1: str,
concept2: str, concept2: str,
concept3: str, concept3: str,
concept4: str, concept4: str,
luma_concepts: LumaConceptChain = None, luma_concepts: LumaConceptChain = None,
): ) -> comfy_io.NodeOutput:
chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4]) chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4])
if luma_concepts is not None: if luma_concepts is not None:
chain = luma_concepts.clone_and_merge(chain) chain = luma_concepts.clone_and_merge(chain)
return (chain,) return comfy_io.NodeOutput(chain)
class LumaImageGenerationNode(ComfyNodeABC): class LumaImageGenerationNode(comfy_io.ComfyNode):
""" """
Generates images synchronously based on prompt and aspect ratio. Generates images synchronously based on prompt and aspect ratio.
""" """
RETURN_TYPES = (IO.IMAGE,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="LumaImageNode",
CATEGORY = "api node/image/Luma" display_name="Luma Text to Image",
category="api node/image/Luma",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the image generation",
),
comfy_io.Combo.Input(
"model",
options=[model.value for model in LumaImageModel],
),
comfy_io.Combo.Input(
"aspect_ratio",
options=[ratio.value for ratio in LumaAspectRatio],
default=LumaAspectRatio.ratio_16_9,
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
),
comfy_io.Float.Input(
"style_image_weight",
default=1.0,
min=0.0,
max=1.0,
step=0.01,
tooltip="Weight of style image. Ignored if no style_image provided.",
),
comfy_io.Custom(LumaIO.LUMA_REF).Input(
"image_luma_ref",
tooltip="Luma Reference node connection to influence generation with input images; up to 4 images can be considered.",
optional=True,
),
comfy_io.Image.Input(
"style_image",
tooltip="Style reference image; only 1 image will be used.",
optional=True,
),
comfy_io.Image.Input(
"character_image",
tooltip="Character reference images; can be a batch of multiple, up to 4 images can be considered.",
optional=True,
),
],
outputs=[comfy_io.Image.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the image generation",
},
),
"model": ([model.value for model in LumaImageModel],),
"aspect_ratio": (
[ratio.value for ratio in LumaAspectRatio],
{
"default": LumaAspectRatio.ratio_16_9,
},
),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 0xFFFFFFFFFFFFFFFF,
"control_after_generate": True,
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
},
),
"style_image_weight": (
IO.FLOAT,
{
"default": 1.0,
"min": 0.0,
"max": 1.0,
"step": 0.01,
"tooltip": "Weight of style image. Ignored if no style_image provided.",
},
),
},
"optional": {
"image_luma_ref": (
LumaIO.LUMA_REF,
{
"tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered."
},
),
"style_image": (
IO.IMAGE,
{"tooltip": "Style reference image; only 1 image will be used."},
),
"character_image": (
IO.IMAGE,
{
"tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
prompt: str, prompt: str,
model: str, model: str,
aspect_ratio: str, aspect_ratio: str,
@ -227,27 +240,29 @@ class LumaImageGenerationNode(ComfyNodeABC):
image_luma_ref: LumaReferenceChain = None, image_luma_ref: LumaReferenceChain = None,
style_image: torch.Tensor = None, style_image: torch.Tensor = None,
character_image: torch.Tensor = None, character_image: torch.Tensor = None,
unique_id: str = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=True, min_length=3) validate_string(prompt, strip_whitespace=True, min_length=3)
auth_kwargs = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
# handle image_luma_ref # handle image_luma_ref
api_image_ref = None api_image_ref = None
if image_luma_ref is not None: if image_luma_ref is not None:
api_image_ref = await self._convert_luma_refs( api_image_ref = await cls._convert_luma_refs(
image_luma_ref, max_refs=4, auth_kwargs=kwargs, image_luma_ref, max_refs=4, auth_kwargs=auth_kwargs,
) )
# handle style_luma_ref # handle style_luma_ref
api_style_ref = None api_style_ref = None
if style_image is not None: if style_image is not None:
api_style_ref = await self._convert_style_image( api_style_ref = await cls._convert_style_image(
style_image, weight=style_image_weight, auth_kwargs=kwargs, style_image, weight=style_image_weight, auth_kwargs=auth_kwargs,
) )
# handle character_ref images # handle character_ref images
character_ref = None character_ref = None
if character_image is not None: if character_image is not None:
download_urls = await upload_images_to_comfyapi( download_urls = await upload_images_to_comfyapi(
character_image, max_images=4, auth_kwargs=kwargs, character_image, max_images=4, auth_kwargs=auth_kwargs,
) )
character_ref = LumaCharacterRef( character_ref = LumaCharacterRef(
identity0=LumaImageIdentity(images=download_urls) identity0=LumaImageIdentity(images=download_urls)
@ -268,7 +283,7 @@ class LumaImageGenerationNode(ComfyNodeABC):
style_ref=api_style_ref, style_ref=api_style_ref,
character_ref=character_ref, character_ref=character_ref,
), ),
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_api: LumaGeneration = await operation.execute() response_api: LumaGeneration = await operation.execute()
@ -283,18 +298,19 @@ class LumaImageGenerationNode(ComfyNodeABC):
failed_statuses=[LumaState.failed], failed_statuses=[LumaState.failed],
status_extractor=lambda x: x.state, status_extractor=lambda x: x.state,
result_url_extractor=image_result_url_extractor, result_url_extractor=image_result_url_extractor,
node_id=unique_id, node_id=cls.hidden.unique_id,
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_poll = await operation.execute() response_poll = await operation.execute()
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.image) as img_response: async with session.get(response_poll.assets.image) as img_response:
img = process_image_response(await img_response.content.read()) img = process_image_response(await img_response.content.read())
return (img,) return comfy_io.NodeOutput(img)
@classmethod
async def _convert_luma_refs( async def _convert_luma_refs(
self, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None cls, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None
): ):
luma_urls = [] luma_urls = []
ref_count = 0 ref_count = 0
@ -308,82 +324,84 @@ class LumaImageGenerationNode(ComfyNodeABC):
break break
return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs) return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs)
@classmethod
async def _convert_style_image( async def _convert_style_image(
self, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None cls, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None
): ):
chain = LumaReferenceChain( chain = LumaReferenceChain(
first_ref=LumaReference(image=style_image, weight=weight) first_ref=LumaReference(image=style_image, weight=weight)
) )
return await self._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs) return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs)
class LumaImageModifyNode(ComfyNodeABC): class LumaImageModifyNode(comfy_io.ComfyNode):
""" """
Modifies images synchronously based on prompt and aspect ratio. Modifies images synchronously based on prompt and aspect ratio.
""" """
RETURN_TYPES = (IO.IMAGE,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="LumaImageModifyNode",
CATEGORY = "api node/image/Luma" display_name="Luma Image to Image",
category="api node/image/Luma",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.Image.Input(
"image",
),
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the image generation",
),
comfy_io.Float.Input(
"image_weight",
default=0.1,
min=0.0,
max=0.98,
step=0.01,
tooltip="Weight of the image; the closer to 1.0, the less the image will be modified.",
),
comfy_io.Combo.Input(
"model",
options=[model.value for model in LumaImageModel],
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
),
],
outputs=[comfy_io.Image.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"image": (IO.IMAGE,),
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the image generation",
},
),
"image_weight": (
IO.FLOAT,
{
"default": 0.1,
"min": 0.0,
"max": 0.98,
"step": 0.01,
"tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified.",
},
),
"model": ([model.value for model in LumaImageModel],),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 0xFFFFFFFFFFFFFFFF,
"control_after_generate": True,
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
},
),
},
"optional": {},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
prompt: str, prompt: str,
model: str, model: str,
image: torch.Tensor, image: torch.Tensor,
image_weight: float, image_weight: float,
seed, seed,
unique_id: str = None, ) -> comfy_io.NodeOutput:
**kwargs, auth_kwargs = {
): "auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
# first, upload image # first, upload image
download_urls = await upload_images_to_comfyapi( download_urls = await upload_images_to_comfyapi(
image, max_images=1, auth_kwargs=kwargs, image, max_images=1, auth_kwargs=auth_kwargs,
) )
image_url = download_urls[0] image_url = download_urls[0]
# next, make Luma call with download url provided # next, make Luma call with download url provided
@ -401,7 +419,7 @@ class LumaImageModifyNode(ComfyNodeABC):
url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2) url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2)
), ),
), ),
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_api: LumaGeneration = await operation.execute() response_api: LumaGeneration = await operation.execute()
@ -416,88 +434,84 @@ class LumaImageModifyNode(ComfyNodeABC):
failed_statuses=[LumaState.failed], failed_statuses=[LumaState.failed],
status_extractor=lambda x: x.state, status_extractor=lambda x: x.state,
result_url_extractor=image_result_url_extractor, result_url_extractor=image_result_url_extractor,
node_id=unique_id, node_id=cls.hidden.unique_id,
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_poll = await operation.execute() response_poll = await operation.execute()
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.image) as img_response: async with session.get(response_poll.assets.image) as img_response:
img = process_image_response(await img_response.content.read()) img = process_image_response(await img_response.content.read())
return (img,) return comfy_io.NodeOutput(img)
class LumaTextToVideoGenerationNode(ComfyNodeABC): class LumaTextToVideoGenerationNode(comfy_io.ComfyNode):
""" """
Generates videos synchronously based on prompt and output_size. Generates videos synchronously based on prompt and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="LumaVideoNode",
CATEGORY = "api node/video/Luma" display_name="Luma Text to Video",
category="api node/video/Luma",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"model",
options=[model.value for model in LumaVideoModel],
),
comfy_io.Combo.Input(
"aspect_ratio",
options=[ratio.value for ratio in LumaAspectRatio],
default=LumaAspectRatio.ratio_16_9,
),
comfy_io.Combo.Input(
"resolution",
options=[resolution.value for resolution in LumaVideoOutputResolution],
default=LumaVideoOutputResolution.res_540p,
),
comfy_io.Combo.Input(
"duration",
options=[dur.value for dur in LumaVideoModelOutputDuration],
),
comfy_io.Boolean.Input(
"loop",
default=False,
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
),
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input(
"luma_concepts",
tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.",
optional=True,
)
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"model": ([model.value for model in LumaVideoModel],),
"aspect_ratio": (
[ratio.value for ratio in LumaAspectRatio],
{
"default": LumaAspectRatio.ratio_16_9,
},
),
"resolution": (
[resolution.value for resolution in LumaVideoOutputResolution],
{
"default": LumaVideoOutputResolution.res_540p,
},
),
"duration": ([dur.value for dur in LumaVideoModelOutputDuration],),
"loop": (
IO.BOOLEAN,
{
"default": False,
},
),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 0xFFFFFFFFFFFFFFFF,
"control_after_generate": True,
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
},
),
},
"optional": {
"luma_concepts": (
LumaIO.LUMA_CONCEPTS,
{
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
prompt: str, prompt: str,
model: str, model: str,
aspect_ratio: str, aspect_ratio: str,
@ -506,13 +520,15 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC):
loop: bool, loop: bool,
seed, seed,
luma_concepts: LumaConceptChain = None, luma_concepts: LumaConceptChain = None,
unique_id: str = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
validate_string(prompt, strip_whitespace=False, min_length=3) validate_string(prompt, strip_whitespace=False, min_length=3)
duration = duration if model != LumaVideoModel.ray_1_6 else None duration = duration if model != LumaVideoModel.ray_1_6 else None
resolution = resolution if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None
auth_kwargs = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
operation = SynchronousOperation( operation = SynchronousOperation(
endpoint=ApiEndpoint( endpoint=ApiEndpoint(
path="/proxy/luma/generations", path="/proxy/luma/generations",
@ -529,12 +545,12 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC):
loop=loop, loop=loop,
concepts=luma_concepts.create_api_model() if luma_concepts else None, concepts=luma_concepts.create_api_model() if luma_concepts else None,
), ),
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_api: LumaGeneration = await operation.execute() response_api: LumaGeneration = await operation.execute()
if unique_id: if cls.hidden.unique_id:
PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id)
operation = PollingOperation( operation = PollingOperation(
poll_endpoint=ApiEndpoint( poll_endpoint=ApiEndpoint(
@ -547,90 +563,94 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC):
failed_statuses=[LumaState.failed], failed_statuses=[LumaState.failed],
status_extractor=lambda x: x.state, status_extractor=lambda x: x.state,
result_url_extractor=video_result_url_extractor, result_url_extractor=video_result_url_extractor,
node_id=unique_id, node_id=cls.hidden.unique_id,
estimated_duration=LUMA_T2V_AVERAGE_DURATION, estimated_duration=LUMA_T2V_AVERAGE_DURATION,
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_poll = await operation.execute() response_poll = await operation.execute()
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.video) as vid_response: async with session.get(response_poll.assets.video) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
class LumaImageToVideoGenerationNode(ComfyNodeABC): class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
""" """
Generates videos synchronously based on prompt, input images, and output_size. Generates videos synchronously based on prompt, input images, and output_size.
""" """
RETURN_TYPES = (IO.VIDEO,) @classmethod
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value def define_schema(cls) -> comfy_io.Schema:
FUNCTION = "api_call" return comfy_io.Schema(
API_NODE = True node_id="LumaImageToVideoNode",
CATEGORY = "api node/video/Luma" display_name="Luma Image to Video",
category="api node/video/Luma",
description=cleandoc(cls.__doc__ or ""),
inputs=[
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt for the video generation",
),
comfy_io.Combo.Input(
"model",
options=[model.value for model in LumaVideoModel],
),
# comfy_io.Combo.Input(
# "aspect_ratio",
# options=[ratio.value for ratio in LumaAspectRatio],
# default=LumaAspectRatio.ratio_16_9,
# ),
comfy_io.Combo.Input(
"resolution",
options=[resolution.value for resolution in LumaVideoOutputResolution],
default=LumaVideoOutputResolution.res_540p,
),
comfy_io.Combo.Input(
"duration",
options=[dur.value for dur in LumaVideoModelOutputDuration],
),
comfy_io.Boolean.Input(
"loop",
default=False,
),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=0xFFFFFFFFFFFFFFFF,
control_after_generate=True,
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
),
comfy_io.Image.Input(
"first_image",
tooltip="First frame of generated video.",
optional=True,
),
comfy_io.Image.Input(
"last_image",
tooltip="Last frame of generated video.",
optional=True,
),
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input(
"luma_concepts",
tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.",
optional=True,
)
],
outputs=[comfy_io.Video.Output()],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod @classmethod
def INPUT_TYPES(s): async def execute(
return { cls,
"required": {
"prompt": (
IO.STRING,
{
"multiline": True,
"default": "",
"tooltip": "Prompt for the video generation",
},
),
"model": ([model.value for model in LumaVideoModel],),
# "aspect_ratio": ([ratio.value for ratio in LumaAspectRatio], {
# "default": LumaAspectRatio.ratio_16_9,
# }),
"resolution": (
[resolution.value for resolution in LumaVideoOutputResolution],
{
"default": LumaVideoOutputResolution.res_540p,
},
),
"duration": ([dur.value for dur in LumaVideoModelOutputDuration],),
"loop": (
IO.BOOLEAN,
{
"default": False,
},
),
"seed": (
IO.INT,
{
"default": 0,
"min": 0,
"max": 0xFFFFFFFFFFFFFFFF,
"control_after_generate": True,
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
},
),
},
"optional": {
"first_image": (
IO.IMAGE,
{"tooltip": "First frame of generated video."},
),
"last_image": (IO.IMAGE, {"tooltip": "Last frame of generated video."}),
"luma_concepts": (
LumaIO.LUMA_CONCEPTS,
{
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
},
),
},
"hidden": {
"auth_token": "AUTH_TOKEN_COMFY_ORG",
"comfy_api_key": "API_KEY_COMFY_ORG",
"unique_id": "UNIQUE_ID",
},
}
async def api_call(
self,
prompt: str, prompt: str,
model: str, model: str,
resolution: str, resolution: str,
@ -640,14 +660,16 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
first_image: torch.Tensor = None, first_image: torch.Tensor = None,
last_image: torch.Tensor = None, last_image: torch.Tensor = None,
luma_concepts: LumaConceptChain = None, luma_concepts: LumaConceptChain = None,
unique_id: str = None, ) -> comfy_io.NodeOutput:
**kwargs,
):
if first_image is None and last_image is None: if first_image is None and last_image is None:
raise Exception( raise Exception(
"At least one of first_image and last_image requires an input." "At least one of first_image and last_image requires an input."
) )
keyframes = await self._convert_to_keyframes(first_image, last_image, auth_kwargs=kwargs) auth_kwargs = {
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
}
keyframes = await cls._convert_to_keyframes(first_image, last_image, auth_kwargs=auth_kwargs)
duration = duration if model != LumaVideoModel.ray_1_6 else None duration = duration if model != LumaVideoModel.ray_1_6 else None
resolution = resolution if model != LumaVideoModel.ray_1_6 else None resolution = resolution if model != LumaVideoModel.ray_1_6 else None
@ -668,12 +690,12 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
keyframes=keyframes, keyframes=keyframes,
concepts=luma_concepts.create_api_model() if luma_concepts else None, concepts=luma_concepts.create_api_model() if luma_concepts else None,
), ),
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_api: LumaGeneration = await operation.execute() response_api: LumaGeneration = await operation.execute()
if unique_id: if cls.hidden.unique_id:
PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id) PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id)
operation = PollingOperation( operation = PollingOperation(
poll_endpoint=ApiEndpoint( poll_endpoint=ApiEndpoint(
@ -686,18 +708,19 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
failed_statuses=[LumaState.failed], failed_statuses=[LumaState.failed],
status_extractor=lambda x: x.state, status_extractor=lambda x: x.state,
result_url_extractor=video_result_url_extractor, result_url_extractor=video_result_url_extractor,
node_id=unique_id, node_id=cls.hidden.unique_id,
estimated_duration=LUMA_I2V_AVERAGE_DURATION, estimated_duration=LUMA_I2V_AVERAGE_DURATION,
auth_kwargs=kwargs, auth_kwargs=auth_kwargs,
) )
response_poll = await operation.execute() response_poll = await operation.execute()
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
async with session.get(response_poll.assets.video) as vid_response: async with session.get(response_poll.assets.video) as vid_response:
return (VideoFromFile(BytesIO(await vid_response.content.read())),) return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
@classmethod
async def _convert_to_keyframes( async def _convert_to_keyframes(
self, cls,
first_image: torch.Tensor = None, first_image: torch.Tensor = None,
last_image: torch.Tensor = None, last_image: torch.Tensor = None,
auth_kwargs: Optional[dict[str,str]] = None, auth_kwargs: Optional[dict[str,str]] = None,
@ -719,23 +742,18 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
return LumaKeyframes(frame0=frame0, frame1=frame1) return LumaKeyframes(frame0=frame0, frame1=frame1)
# A dictionary that contains all nodes you want to export with their names class LumaExtension(ComfyExtension):
# NOTE: names should be globally unique @override
NODE_CLASS_MAPPINGS = { async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
"LumaImageNode": LumaImageGenerationNode, return [
"LumaImageModifyNode": LumaImageModifyNode, LumaImageGenerationNode,
"LumaVideoNode": LumaTextToVideoGenerationNode, LumaImageModifyNode,
"LumaImageToVideoNode": LumaImageToVideoGenerationNode, LumaTextToVideoGenerationNode,
"LumaReferenceNode": LumaReferenceNode, LumaImageToVideoGenerationNode,
"LumaConceptsNode": LumaConceptsNode, LumaReferenceNode,
} LumaConceptsNode,
]
# A dictionary that contains the friendly/humanly readable titles for the nodes
NODE_DISPLAY_NAME_MAPPINGS = { async def comfy_entrypoint() -> LumaExtension:
"LumaImageNode": "Luma Text to Image", return LumaExtension()
"LumaImageModifyNode": "Luma Image to Image",
"LumaVideoNode": "Luma Text to Video",
"LumaImageToVideoNode": "Luma Image to Video",
"LumaReferenceNode": "Luma Reference",
"LumaConceptsNode": "Luma Concepts",
}