mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-17 10:02:59 +08:00
convert nodes_luma.py to V3 schema (#10030)
This commit is contained in:
parent
6b4b671ce7
commit
bcfd80dd79
@ -1,7 +1,8 @@
|
||||
from __future__ import annotations
|
||||
from inspect import cleandoc
|
||||
from typing import Optional
|
||||
from comfy.comfy_types.node_typing import IO, ComfyNodeABC
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io as comfy_io
|
||||
from comfy_api.input_impl.video_types import VideoFromFile
|
||||
from comfy_api_nodes.apis.luma_api import (
|
||||
LumaImageModel,
|
||||
@ -51,174 +52,186 @@ def image_result_url_extractor(response: LumaGeneration):
|
||||
def video_result_url_extractor(response: LumaGeneration):
|
||||
return response.assets.video if hasattr(response, "assets") and hasattr(response.assets, "video") else None
|
||||
|
||||
class LumaReferenceNode(ComfyNodeABC):
|
||||
class LumaReferenceNode(comfy_io.ComfyNode):
|
||||
"""
|
||||
Holds an image and weight for use with Luma Generate Image node.
|
||||
"""
|
||||
|
||||
RETURN_TYPES = (LumaIO.LUMA_REF,)
|
||||
RETURN_NAMES = ("luma_ref",)
|
||||
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
||||
FUNCTION = "create_luma_reference"
|
||||
CATEGORY = "api node/image/Luma"
|
||||
@classmethod
|
||||
def define_schema(cls) -> comfy_io.Schema:
|
||||
return comfy_io.Schema(
|
||||
node_id="LumaReferenceNode",
|
||||
display_name="Luma Reference",
|
||||
category="api node/image/Luma",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
inputs=[
|
||||
comfy_io.Image.Input(
|
||||
"image",
|
||||
tooltip="Image to use as reference.",
|
||||
),
|
||||
comfy_io.Float.Input(
|
||||
"weight",
|
||||
default=1.0,
|
||||
min=0.0,
|
||||
max=1.0,
|
||||
step=0.01,
|
||||
tooltip="Weight of image reference.",
|
||||
),
|
||||
comfy_io.Custom(LumaIO.LUMA_REF).Input(
|
||||
"luma_ref",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[comfy_io.Custom(LumaIO.LUMA_REF).Output(display_name="luma_ref")],
|
||||
hidden=[
|
||||
comfy_io.Hidden.auth_token_comfy_org,
|
||||
comfy_io.Hidden.api_key_comfy_org,
|
||||
comfy_io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"image": (
|
||||
IO.IMAGE,
|
||||
{
|
||||
"tooltip": "Image to use as reference.",
|
||||
},
|
||||
),
|
||||
"weight": (
|
||||
IO.FLOAT,
|
||||
{
|
||||
"default": 1.0,
|
||||
"min": 0.0,
|
||||
"max": 1.0,
|
||||
"step": 0.01,
|
||||
"tooltip": "Weight of image reference.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": {"luma_ref": (LumaIO.LUMA_REF,)},
|
||||
}
|
||||
|
||||
def create_luma_reference(
|
||||
self, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None
|
||||
):
|
||||
def execute(
|
||||
cls, image: torch.Tensor, weight: float, luma_ref: LumaReferenceChain = None
|
||||
) -> comfy_io.NodeOutput:
|
||||
if luma_ref is not None:
|
||||
luma_ref = luma_ref.clone()
|
||||
else:
|
||||
luma_ref = LumaReferenceChain()
|
||||
luma_ref.add(LumaReference(image=image, weight=round(weight, 2)))
|
||||
return (luma_ref,)
|
||||
return comfy_io.NodeOutput(luma_ref)
|
||||
|
||||
|
||||
class LumaConceptsNode(ComfyNodeABC):
|
||||
class LumaConceptsNode(comfy_io.ComfyNode):
|
||||
"""
|
||||
Holds one or more Camera Concepts for use with Luma Text to Video and Luma Image to Video nodes.
|
||||
"""
|
||||
|
||||
RETURN_TYPES = (LumaIO.LUMA_CONCEPTS,)
|
||||
RETURN_NAMES = ("luma_concepts",)
|
||||
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
||||
FUNCTION = "create_concepts"
|
||||
CATEGORY = "api node/video/Luma"
|
||||
@classmethod
|
||||
def define_schema(cls) -> comfy_io.Schema:
|
||||
return comfy_io.Schema(
|
||||
node_id="LumaConceptsNode",
|
||||
display_name="Luma Concepts",
|
||||
category="api node/video/Luma",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
inputs=[
|
||||
comfy_io.Combo.Input(
|
||||
"concept1",
|
||||
options=get_luma_concepts(include_none=True),
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"concept2",
|
||||
options=get_luma_concepts(include_none=True),
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"concept3",
|
||||
options=get_luma_concepts(include_none=True),
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"concept4",
|
||||
options=get_luma_concepts(include_none=True),
|
||||
),
|
||||
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input(
|
||||
"luma_concepts",
|
||||
tooltip="Optional Camera Concepts to add to the ones chosen here.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Output(display_name="luma_concepts")],
|
||||
hidden=[
|
||||
comfy_io.Hidden.auth_token_comfy_org,
|
||||
comfy_io.Hidden.api_key_comfy_org,
|
||||
comfy_io.Hidden.unique_id,
|
||||
],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"concept1": (get_luma_concepts(include_none=True),),
|
||||
"concept2": (get_luma_concepts(include_none=True),),
|
||||
"concept3": (get_luma_concepts(include_none=True),),
|
||||
"concept4": (get_luma_concepts(include_none=True),),
|
||||
},
|
||||
"optional": {
|
||||
"luma_concepts": (
|
||||
LumaIO.LUMA_CONCEPTS,
|
||||
{
|
||||
"tooltip": "Optional Camera Concepts to add to the ones chosen here."
|
||||
},
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
def create_concepts(
|
||||
self,
|
||||
def execute(
|
||||
cls,
|
||||
concept1: str,
|
||||
concept2: str,
|
||||
concept3: str,
|
||||
concept4: str,
|
||||
luma_concepts: LumaConceptChain = None,
|
||||
):
|
||||
) -> comfy_io.NodeOutput:
|
||||
chain = LumaConceptChain(str_list=[concept1, concept2, concept3, concept4])
|
||||
if luma_concepts is not None:
|
||||
chain = luma_concepts.clone_and_merge(chain)
|
||||
return (chain,)
|
||||
return comfy_io.NodeOutput(chain)
|
||||
|
||||
|
||||
class LumaImageGenerationNode(ComfyNodeABC):
|
||||
class LumaImageGenerationNode(comfy_io.ComfyNode):
|
||||
"""
|
||||
Generates images synchronously based on prompt and aspect ratio.
|
||||
"""
|
||||
|
||||
RETURN_TYPES = (IO.IMAGE,)
|
||||
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
||||
FUNCTION = "api_call"
|
||||
API_NODE = True
|
||||
CATEGORY = "api node/image/Luma"
|
||||
@classmethod
|
||||
def define_schema(cls) -> comfy_io.Schema:
|
||||
return comfy_io.Schema(
|
||||
node_id="LumaImageNode",
|
||||
display_name="Luma Text to Image",
|
||||
category="api node/image/Luma",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
inputs=[
|
||||
comfy_io.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Prompt for the image generation",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaImageModel],
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[ratio.value for ratio in LumaAspectRatio],
|
||||
default=LumaAspectRatio.ratio_16_9,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
comfy_io.Float.Input(
|
||||
"style_image_weight",
|
||||
default=1.0,
|
||||
min=0.0,
|
||||
max=1.0,
|
||||
step=0.01,
|
||||
tooltip="Weight of style image. Ignored if no style_image provided.",
|
||||
),
|
||||
comfy_io.Custom(LumaIO.LUMA_REF).Input(
|
||||
"image_luma_ref",
|
||||
tooltip="Luma Reference node connection to influence generation with input images; up to 4 images can be considered.",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
"style_image",
|
||||
tooltip="Style reference image; only 1 image will be used.",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
"character_image",
|
||||
tooltip="Character reference images; can be a batch of multiple, up to 4 images can be considered.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[comfy_io.Image.Output()],
|
||||
hidden=[
|
||||
comfy_io.Hidden.auth_token_comfy_org,
|
||||
comfy_io.Hidden.api_key_comfy_org,
|
||||
comfy_io.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"prompt": (
|
||||
IO.STRING,
|
||||
{
|
||||
"multiline": True,
|
||||
"default": "",
|
||||
"tooltip": "Prompt for the image generation",
|
||||
},
|
||||
),
|
||||
"model": ([model.value for model in LumaImageModel],),
|
||||
"aspect_ratio": (
|
||||
[ratio.value for ratio in LumaAspectRatio],
|
||||
{
|
||||
"default": LumaAspectRatio.ratio_16_9,
|
||||
},
|
||||
),
|
||||
"seed": (
|
||||
IO.INT,
|
||||
{
|
||||
"default": 0,
|
||||
"min": 0,
|
||||
"max": 0xFFFFFFFFFFFFFFFF,
|
||||
"control_after_generate": True,
|
||||
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
},
|
||||
),
|
||||
"style_image_weight": (
|
||||
IO.FLOAT,
|
||||
{
|
||||
"default": 1.0,
|
||||
"min": 0.0,
|
||||
"max": 1.0,
|
||||
"step": 0.01,
|
||||
"tooltip": "Weight of style image. Ignored if no style_image provided.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"image_luma_ref": (
|
||||
LumaIO.LUMA_REF,
|
||||
{
|
||||
"tooltip": "Luma Reference node connection to influence generation with input images; up to 4 images can be considered."
|
||||
},
|
||||
),
|
||||
"style_image": (
|
||||
IO.IMAGE,
|
||||
{"tooltip": "Style reference image; only 1 image will be used."},
|
||||
),
|
||||
"character_image": (
|
||||
IO.IMAGE,
|
||||
{
|
||||
"tooltip": "Character reference images; can be a batch of multiple, up to 4 images can be considered."
|
||||
},
|
||||
),
|
||||
},
|
||||
"hidden": {
|
||||
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
||||
"comfy_api_key": "API_KEY_COMFY_ORG",
|
||||
"unique_id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
async def api_call(
|
||||
self,
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
model: str,
|
||||
aspect_ratio: str,
|
||||
@ -227,27 +240,29 @@ class LumaImageGenerationNode(ComfyNodeABC):
|
||||
image_luma_ref: LumaReferenceChain = None,
|
||||
style_image: torch.Tensor = None,
|
||||
character_image: torch.Tensor = None,
|
||||
unique_id: str = None,
|
||||
**kwargs,
|
||||
):
|
||||
) -> comfy_io.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=3)
|
||||
auth_kwargs = {
|
||||
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||
"comfy_api_key": cls.hidden.api_key_comfy_org,
|
||||
}
|
||||
# handle image_luma_ref
|
||||
api_image_ref = None
|
||||
if image_luma_ref is not None:
|
||||
api_image_ref = await self._convert_luma_refs(
|
||||
image_luma_ref, max_refs=4, auth_kwargs=kwargs,
|
||||
api_image_ref = await cls._convert_luma_refs(
|
||||
image_luma_ref, max_refs=4, auth_kwargs=auth_kwargs,
|
||||
)
|
||||
# handle style_luma_ref
|
||||
api_style_ref = None
|
||||
if style_image is not None:
|
||||
api_style_ref = await self._convert_style_image(
|
||||
style_image, weight=style_image_weight, auth_kwargs=kwargs,
|
||||
api_style_ref = await cls._convert_style_image(
|
||||
style_image, weight=style_image_weight, auth_kwargs=auth_kwargs,
|
||||
)
|
||||
# handle character_ref images
|
||||
character_ref = None
|
||||
if character_image is not None:
|
||||
download_urls = await upload_images_to_comfyapi(
|
||||
character_image, max_images=4, auth_kwargs=kwargs,
|
||||
character_image, max_images=4, auth_kwargs=auth_kwargs,
|
||||
)
|
||||
character_ref = LumaCharacterRef(
|
||||
identity0=LumaImageIdentity(images=download_urls)
|
||||
@ -268,7 +283,7 @@ class LumaImageGenerationNode(ComfyNodeABC):
|
||||
style_ref=api_style_ref,
|
||||
character_ref=character_ref,
|
||||
),
|
||||
auth_kwargs=kwargs,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_api: LumaGeneration = await operation.execute()
|
||||
|
||||
@ -283,18 +298,19 @@ class LumaImageGenerationNode(ComfyNodeABC):
|
||||
failed_statuses=[LumaState.failed],
|
||||
status_extractor=lambda x: x.state,
|
||||
result_url_extractor=image_result_url_extractor,
|
||||
node_id=unique_id,
|
||||
auth_kwargs=kwargs,
|
||||
node_id=cls.hidden.unique_id,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_poll = await operation.execute()
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(response_poll.assets.image) as img_response:
|
||||
img = process_image_response(await img_response.content.read())
|
||||
return (img,)
|
||||
return comfy_io.NodeOutput(img)
|
||||
|
||||
@classmethod
|
||||
async def _convert_luma_refs(
|
||||
self, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None
|
||||
cls, luma_ref: LumaReferenceChain, max_refs: int, auth_kwargs: Optional[dict[str,str]] = None
|
||||
):
|
||||
luma_urls = []
|
||||
ref_count = 0
|
||||
@ -308,82 +324,84 @@ class LumaImageGenerationNode(ComfyNodeABC):
|
||||
break
|
||||
return luma_ref.create_api_model(download_urls=luma_urls, max_refs=max_refs)
|
||||
|
||||
@classmethod
|
||||
async def _convert_style_image(
|
||||
self, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None
|
||||
cls, style_image: torch.Tensor, weight: float, auth_kwargs: Optional[dict[str,str]] = None
|
||||
):
|
||||
chain = LumaReferenceChain(
|
||||
first_ref=LumaReference(image=style_image, weight=weight)
|
||||
)
|
||||
return await self._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs)
|
||||
return await cls._convert_luma_refs(chain, max_refs=1, auth_kwargs=auth_kwargs)
|
||||
|
||||
|
||||
class LumaImageModifyNode(ComfyNodeABC):
|
||||
class LumaImageModifyNode(comfy_io.ComfyNode):
|
||||
"""
|
||||
Modifies images synchronously based on prompt and aspect ratio.
|
||||
"""
|
||||
|
||||
RETURN_TYPES = (IO.IMAGE,)
|
||||
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
||||
FUNCTION = "api_call"
|
||||
API_NODE = True
|
||||
CATEGORY = "api node/image/Luma"
|
||||
@classmethod
|
||||
def define_schema(cls) -> comfy_io.Schema:
|
||||
return comfy_io.Schema(
|
||||
node_id="LumaImageModifyNode",
|
||||
display_name="Luma Image to Image",
|
||||
category="api node/image/Luma",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
inputs=[
|
||||
comfy_io.Image.Input(
|
||||
"image",
|
||||
),
|
||||
comfy_io.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Prompt for the image generation",
|
||||
),
|
||||
comfy_io.Float.Input(
|
||||
"image_weight",
|
||||
default=0.1,
|
||||
min=0.0,
|
||||
max=0.98,
|
||||
step=0.01,
|
||||
tooltip="Weight of the image; the closer to 1.0, the less the image will be modified.",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaImageModel],
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
],
|
||||
outputs=[comfy_io.Image.Output()],
|
||||
hidden=[
|
||||
comfy_io.Hidden.auth_token_comfy_org,
|
||||
comfy_io.Hidden.api_key_comfy_org,
|
||||
comfy_io.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"image": (IO.IMAGE,),
|
||||
"prompt": (
|
||||
IO.STRING,
|
||||
{
|
||||
"multiline": True,
|
||||
"default": "",
|
||||
"tooltip": "Prompt for the image generation",
|
||||
},
|
||||
),
|
||||
"image_weight": (
|
||||
IO.FLOAT,
|
||||
{
|
||||
"default": 0.1,
|
||||
"min": 0.0,
|
||||
"max": 0.98,
|
||||
"step": 0.01,
|
||||
"tooltip": "Weight of the image; the closer to 1.0, the less the image will be modified.",
|
||||
},
|
||||
),
|
||||
"model": ([model.value for model in LumaImageModel],),
|
||||
"seed": (
|
||||
IO.INT,
|
||||
{
|
||||
"default": 0,
|
||||
"min": 0,
|
||||
"max": 0xFFFFFFFFFFFFFFFF,
|
||||
"control_after_generate": True,
|
||||
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": {},
|
||||
"hidden": {
|
||||
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
||||
"comfy_api_key": "API_KEY_COMFY_ORG",
|
||||
"unique_id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
async def api_call(
|
||||
self,
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
model: str,
|
||||
image: torch.Tensor,
|
||||
image_weight: float,
|
||||
seed,
|
||||
unique_id: str = None,
|
||||
**kwargs,
|
||||
):
|
||||
) -> comfy_io.NodeOutput:
|
||||
auth_kwargs = {
|
||||
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||
"comfy_api_key": cls.hidden.api_key_comfy_org,
|
||||
}
|
||||
# first, upload image
|
||||
download_urls = await upload_images_to_comfyapi(
|
||||
image, max_images=1, auth_kwargs=kwargs,
|
||||
image, max_images=1, auth_kwargs=auth_kwargs,
|
||||
)
|
||||
image_url = download_urls[0]
|
||||
# next, make Luma call with download url provided
|
||||
@ -401,7 +419,7 @@ class LumaImageModifyNode(ComfyNodeABC):
|
||||
url=image_url, weight=round(max(min(1.0-image_weight, 0.98), 0.0), 2)
|
||||
),
|
||||
),
|
||||
auth_kwargs=kwargs,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_api: LumaGeneration = await operation.execute()
|
||||
|
||||
@ -416,88 +434,84 @@ class LumaImageModifyNode(ComfyNodeABC):
|
||||
failed_statuses=[LumaState.failed],
|
||||
status_extractor=lambda x: x.state,
|
||||
result_url_extractor=image_result_url_extractor,
|
||||
node_id=unique_id,
|
||||
auth_kwargs=kwargs,
|
||||
node_id=cls.hidden.unique_id,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_poll = await operation.execute()
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(response_poll.assets.image) as img_response:
|
||||
img = process_image_response(await img_response.content.read())
|
||||
return (img,)
|
||||
return comfy_io.NodeOutput(img)
|
||||
|
||||
|
||||
class LumaTextToVideoGenerationNode(ComfyNodeABC):
|
||||
class LumaTextToVideoGenerationNode(comfy_io.ComfyNode):
|
||||
"""
|
||||
Generates videos synchronously based on prompt and output_size.
|
||||
"""
|
||||
|
||||
RETURN_TYPES = (IO.VIDEO,)
|
||||
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
||||
FUNCTION = "api_call"
|
||||
API_NODE = True
|
||||
CATEGORY = "api node/video/Luma"
|
||||
@classmethod
|
||||
def define_schema(cls) -> comfy_io.Schema:
|
||||
return comfy_io.Schema(
|
||||
node_id="LumaVideoNode",
|
||||
display_name="Luma Text to Video",
|
||||
category="api node/video/Luma",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
inputs=[
|
||||
comfy_io.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Prompt for the video generation",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaVideoModel],
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[ratio.value for ratio in LumaAspectRatio],
|
||||
default=LumaAspectRatio.ratio_16_9,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution",
|
||||
options=[resolution.value for resolution in LumaVideoOutputResolution],
|
||||
default=LumaVideoOutputResolution.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration",
|
||||
options=[dur.value for dur in LumaVideoModelOutputDuration],
|
||||
),
|
||||
comfy_io.Boolean.Input(
|
||||
"loop",
|
||||
default=False,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input(
|
||||
"luma_concepts",
|
||||
tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.",
|
||||
optional=True,
|
||||
)
|
||||
],
|
||||
outputs=[comfy_io.Video.Output()],
|
||||
hidden=[
|
||||
comfy_io.Hidden.auth_token_comfy_org,
|
||||
comfy_io.Hidden.api_key_comfy_org,
|
||||
comfy_io.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"prompt": (
|
||||
IO.STRING,
|
||||
{
|
||||
"multiline": True,
|
||||
"default": "",
|
||||
"tooltip": "Prompt for the video generation",
|
||||
},
|
||||
),
|
||||
"model": ([model.value for model in LumaVideoModel],),
|
||||
"aspect_ratio": (
|
||||
[ratio.value for ratio in LumaAspectRatio],
|
||||
{
|
||||
"default": LumaAspectRatio.ratio_16_9,
|
||||
},
|
||||
),
|
||||
"resolution": (
|
||||
[resolution.value for resolution in LumaVideoOutputResolution],
|
||||
{
|
||||
"default": LumaVideoOutputResolution.res_540p,
|
||||
},
|
||||
),
|
||||
"duration": ([dur.value for dur in LumaVideoModelOutputDuration],),
|
||||
"loop": (
|
||||
IO.BOOLEAN,
|
||||
{
|
||||
"default": False,
|
||||
},
|
||||
),
|
||||
"seed": (
|
||||
IO.INT,
|
||||
{
|
||||
"default": 0,
|
||||
"min": 0,
|
||||
"max": 0xFFFFFFFFFFFFFFFF,
|
||||
"control_after_generate": True,
|
||||
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"luma_concepts": (
|
||||
LumaIO.LUMA_CONCEPTS,
|
||||
{
|
||||
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
|
||||
},
|
||||
),
|
||||
},
|
||||
"hidden": {
|
||||
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
||||
"comfy_api_key": "API_KEY_COMFY_ORG",
|
||||
"unique_id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
async def api_call(
|
||||
self,
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
model: str,
|
||||
aspect_ratio: str,
|
||||
@ -506,13 +520,15 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC):
|
||||
loop: bool,
|
||||
seed,
|
||||
luma_concepts: LumaConceptChain = None,
|
||||
unique_id: str = None,
|
||||
**kwargs,
|
||||
):
|
||||
) -> comfy_io.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=False, min_length=3)
|
||||
duration = duration if model != LumaVideoModel.ray_1_6 else None
|
||||
resolution = resolution if model != LumaVideoModel.ray_1_6 else None
|
||||
|
||||
auth_kwargs = {
|
||||
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||
"comfy_api_key": cls.hidden.api_key_comfy_org,
|
||||
}
|
||||
operation = SynchronousOperation(
|
||||
endpoint=ApiEndpoint(
|
||||
path="/proxy/luma/generations",
|
||||
@ -529,12 +545,12 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC):
|
||||
loop=loop,
|
||||
concepts=luma_concepts.create_api_model() if luma_concepts else None,
|
||||
),
|
||||
auth_kwargs=kwargs,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_api: LumaGeneration = await operation.execute()
|
||||
|
||||
if unique_id:
|
||||
PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id)
|
||||
if cls.hidden.unique_id:
|
||||
PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id)
|
||||
|
||||
operation = PollingOperation(
|
||||
poll_endpoint=ApiEndpoint(
|
||||
@ -547,90 +563,94 @@ class LumaTextToVideoGenerationNode(ComfyNodeABC):
|
||||
failed_statuses=[LumaState.failed],
|
||||
status_extractor=lambda x: x.state,
|
||||
result_url_extractor=video_result_url_extractor,
|
||||
node_id=unique_id,
|
||||
node_id=cls.hidden.unique_id,
|
||||
estimated_duration=LUMA_T2V_AVERAGE_DURATION,
|
||||
auth_kwargs=kwargs,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_poll = await operation.execute()
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(response_poll.assets.video) as vid_response:
|
||||
return (VideoFromFile(BytesIO(await vid_response.content.read())),)
|
||||
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
|
||||
|
||||
|
||||
class LumaImageToVideoGenerationNode(ComfyNodeABC):
|
||||
class LumaImageToVideoGenerationNode(comfy_io.ComfyNode):
|
||||
"""
|
||||
Generates videos synchronously based on prompt, input images, and output_size.
|
||||
"""
|
||||
|
||||
RETURN_TYPES = (IO.VIDEO,)
|
||||
DESCRIPTION = cleandoc(__doc__ or "") # Handle potential None value
|
||||
FUNCTION = "api_call"
|
||||
API_NODE = True
|
||||
CATEGORY = "api node/video/Luma"
|
||||
@classmethod
|
||||
def define_schema(cls) -> comfy_io.Schema:
|
||||
return comfy_io.Schema(
|
||||
node_id="LumaImageToVideoNode",
|
||||
display_name="Luma Image to Video",
|
||||
category="api node/video/Luma",
|
||||
description=cleandoc(cls.__doc__ or ""),
|
||||
inputs=[
|
||||
comfy_io.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Prompt for the video generation",
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"model",
|
||||
options=[model.value for model in LumaVideoModel],
|
||||
),
|
||||
# comfy_io.Combo.Input(
|
||||
# "aspect_ratio",
|
||||
# options=[ratio.value for ratio in LumaAspectRatio],
|
||||
# default=LumaAspectRatio.ratio_16_9,
|
||||
# ),
|
||||
comfy_io.Combo.Input(
|
||||
"resolution",
|
||||
options=[resolution.value for resolution in LumaVideoOutputResolution],
|
||||
default=LumaVideoOutputResolution.res_540p,
|
||||
),
|
||||
comfy_io.Combo.Input(
|
||||
"duration",
|
||||
options=[dur.value for dur in LumaVideoModelOutputDuration],
|
||||
),
|
||||
comfy_io.Boolean.Input(
|
||||
"loop",
|
||||
default=False,
|
||||
),
|
||||
comfy_io.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
"first_image",
|
||||
tooltip="First frame of generated video.",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Image.Input(
|
||||
"last_image",
|
||||
tooltip="Last frame of generated video.",
|
||||
optional=True,
|
||||
),
|
||||
comfy_io.Custom(LumaIO.LUMA_CONCEPTS).Input(
|
||||
"luma_concepts",
|
||||
tooltip="Optional Camera Concepts to dictate camera motion via the Luma Concepts node.",
|
||||
optional=True,
|
||||
)
|
||||
],
|
||||
outputs=[comfy_io.Video.Output()],
|
||||
hidden=[
|
||||
comfy_io.Hidden.auth_token_comfy_org,
|
||||
comfy_io.Hidden.api_key_comfy_org,
|
||||
comfy_io.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"prompt": (
|
||||
IO.STRING,
|
||||
{
|
||||
"multiline": True,
|
||||
"default": "",
|
||||
"tooltip": "Prompt for the video generation",
|
||||
},
|
||||
),
|
||||
"model": ([model.value for model in LumaVideoModel],),
|
||||
# "aspect_ratio": ([ratio.value for ratio in LumaAspectRatio], {
|
||||
# "default": LumaAspectRatio.ratio_16_9,
|
||||
# }),
|
||||
"resolution": (
|
||||
[resolution.value for resolution in LumaVideoOutputResolution],
|
||||
{
|
||||
"default": LumaVideoOutputResolution.res_540p,
|
||||
},
|
||||
),
|
||||
"duration": ([dur.value for dur in LumaVideoModelOutputDuration],),
|
||||
"loop": (
|
||||
IO.BOOLEAN,
|
||||
{
|
||||
"default": False,
|
||||
},
|
||||
),
|
||||
"seed": (
|
||||
IO.INT,
|
||||
{
|
||||
"default": 0,
|
||||
"min": 0,
|
||||
"max": 0xFFFFFFFFFFFFFFFF,
|
||||
"control_after_generate": True,
|
||||
"tooltip": "Seed to determine if node should re-run; actual results are nondeterministic regardless of seed.",
|
||||
},
|
||||
),
|
||||
},
|
||||
"optional": {
|
||||
"first_image": (
|
||||
IO.IMAGE,
|
||||
{"tooltip": "First frame of generated video."},
|
||||
),
|
||||
"last_image": (IO.IMAGE, {"tooltip": "Last frame of generated video."}),
|
||||
"luma_concepts": (
|
||||
LumaIO.LUMA_CONCEPTS,
|
||||
{
|
||||
"tooltip": "Optional Camera Concepts to dictate camera motion via the Luma Concepts node."
|
||||
},
|
||||
),
|
||||
},
|
||||
"hidden": {
|
||||
"auth_token": "AUTH_TOKEN_COMFY_ORG",
|
||||
"comfy_api_key": "API_KEY_COMFY_ORG",
|
||||
"unique_id": "UNIQUE_ID",
|
||||
},
|
||||
}
|
||||
|
||||
async def api_call(
|
||||
self,
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
model: str,
|
||||
resolution: str,
|
||||
@ -640,14 +660,16 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
|
||||
first_image: torch.Tensor = None,
|
||||
last_image: torch.Tensor = None,
|
||||
luma_concepts: LumaConceptChain = None,
|
||||
unique_id: str = None,
|
||||
**kwargs,
|
||||
):
|
||||
) -> comfy_io.NodeOutput:
|
||||
if first_image is None and last_image is None:
|
||||
raise Exception(
|
||||
"At least one of first_image and last_image requires an input."
|
||||
)
|
||||
keyframes = await self._convert_to_keyframes(first_image, last_image, auth_kwargs=kwargs)
|
||||
auth_kwargs = {
|
||||
"auth_token": cls.hidden.auth_token_comfy_org,
|
||||
"comfy_api_key": cls.hidden.api_key_comfy_org,
|
||||
}
|
||||
keyframes = await cls._convert_to_keyframes(first_image, last_image, auth_kwargs=auth_kwargs)
|
||||
duration = duration if model != LumaVideoModel.ray_1_6 else None
|
||||
resolution = resolution if model != LumaVideoModel.ray_1_6 else None
|
||||
|
||||
@ -668,12 +690,12 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
|
||||
keyframes=keyframes,
|
||||
concepts=luma_concepts.create_api_model() if luma_concepts else None,
|
||||
),
|
||||
auth_kwargs=kwargs,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_api: LumaGeneration = await operation.execute()
|
||||
|
||||
if unique_id:
|
||||
PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", unique_id)
|
||||
if cls.hidden.unique_id:
|
||||
PromptServer.instance.send_progress_text(f"Luma video generation started: {response_api.id}", cls.hidden.unique_id)
|
||||
|
||||
operation = PollingOperation(
|
||||
poll_endpoint=ApiEndpoint(
|
||||
@ -686,18 +708,19 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
|
||||
failed_statuses=[LumaState.failed],
|
||||
status_extractor=lambda x: x.state,
|
||||
result_url_extractor=video_result_url_extractor,
|
||||
node_id=unique_id,
|
||||
node_id=cls.hidden.unique_id,
|
||||
estimated_duration=LUMA_I2V_AVERAGE_DURATION,
|
||||
auth_kwargs=kwargs,
|
||||
auth_kwargs=auth_kwargs,
|
||||
)
|
||||
response_poll = await operation.execute()
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(response_poll.assets.video) as vid_response:
|
||||
return (VideoFromFile(BytesIO(await vid_response.content.read())),)
|
||||
return comfy_io.NodeOutput(VideoFromFile(BytesIO(await vid_response.content.read())))
|
||||
|
||||
@classmethod
|
||||
async def _convert_to_keyframes(
|
||||
self,
|
||||
cls,
|
||||
first_image: torch.Tensor = None,
|
||||
last_image: torch.Tensor = None,
|
||||
auth_kwargs: Optional[dict[str,str]] = None,
|
||||
@ -719,23 +742,18 @@ class LumaImageToVideoGenerationNode(ComfyNodeABC):
|
||||
return LumaKeyframes(frame0=frame0, frame1=frame1)
|
||||
|
||||
|
||||
# A dictionary that contains all nodes you want to export with their names
|
||||
# NOTE: names should be globally unique
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"LumaImageNode": LumaImageGenerationNode,
|
||||
"LumaImageModifyNode": LumaImageModifyNode,
|
||||
"LumaVideoNode": LumaTextToVideoGenerationNode,
|
||||
"LumaImageToVideoNode": LumaImageToVideoGenerationNode,
|
||||
"LumaReferenceNode": LumaReferenceNode,
|
||||
"LumaConceptsNode": LumaConceptsNode,
|
||||
}
|
||||
class LumaExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
|
||||
return [
|
||||
LumaImageGenerationNode,
|
||||
LumaImageModifyNode,
|
||||
LumaTextToVideoGenerationNode,
|
||||
LumaImageToVideoGenerationNode,
|
||||
LumaReferenceNode,
|
||||
LumaConceptsNode,
|
||||
]
|
||||
|
||||
# A dictionary that contains the friendly/humanly readable titles for the nodes
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"LumaImageNode": "Luma Text to Image",
|
||||
"LumaImageModifyNode": "Luma Image to Image",
|
||||
"LumaVideoNode": "Luma Text to Video",
|
||||
"LumaImageToVideoNode": "Luma Image to Video",
|
||||
"LumaReferenceNode": "Luma Reference",
|
||||
"LumaConceptsNode": "Luma Concepts",
|
||||
}
|
||||
|
||||
async def comfy_entrypoint() -> LumaExtension:
|
||||
return LumaExtension()
|
||||
|
||||
Loading…
Reference in New Issue
Block a user