mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-28 23:30:16 +08:00
Merge branch 'comfyanonymous:master' into master
This commit is contained in:
commit
2862921cca
@ -1277,6 +1277,7 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
|
|||||||
phi1_fn = lambda t: torch.expm1(t) / t
|
phi1_fn = lambda t: torch.expm1(t) / t
|
||||||
phi2_fn = lambda t: (phi1_fn(t) - 1.0) / t
|
phi2_fn = lambda t: (phi1_fn(t) - 1.0) / t
|
||||||
|
|
||||||
|
old_sigma_down = None
|
||||||
old_denoised = None
|
old_denoised = None
|
||||||
uncond_denoised = None
|
uncond_denoised = None
|
||||||
def post_cfg_function(args):
|
def post_cfg_function(args):
|
||||||
@ -1304,9 +1305,9 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
|
|||||||
x = x + d * dt
|
x = x + d * dt
|
||||||
else:
|
else:
|
||||||
# Second order multistep method in https://arxiv.org/pdf/2308.02157
|
# Second order multistep method in https://arxiv.org/pdf/2308.02157
|
||||||
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1])
|
t, t_old, t_next, t_prev = t_fn(sigmas[i]), t_fn(old_sigma_down), t_fn(sigma_down), t_fn(sigmas[i - 1])
|
||||||
h = t_next - t
|
h = t_next - t
|
||||||
c2 = (t_prev - t) / h
|
c2 = (t_prev - t_old) / h
|
||||||
|
|
||||||
phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
|
phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
|
||||||
b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
|
b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
|
||||||
@ -1326,6 +1327,7 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
|
|||||||
old_denoised = uncond_denoised
|
old_denoised = uncond_denoised
|
||||||
else:
|
else:
|
||||||
old_denoised = denoised
|
old_denoised = denoised
|
||||||
|
old_sigma_down = sigma_down
|
||||||
return x
|
return x
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
|
|||||||
@ -234,9 +234,7 @@ def download_and_process_images(image_urls):
|
|||||||
|
|
||||||
class IdeogramV1(ComfyNodeABC):
|
class IdeogramV1(ComfyNodeABC):
|
||||||
"""
|
"""
|
||||||
Generates images synchronously using the Ideogram V1 model.
|
Generates images using the Ideogram V1 model.
|
||||||
|
|
||||||
Images links are available for a limited period of time; if you would like to keep the image, you must download it.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -365,9 +363,7 @@ class IdeogramV1(ComfyNodeABC):
|
|||||||
|
|
||||||
class IdeogramV2(ComfyNodeABC):
|
class IdeogramV2(ComfyNodeABC):
|
||||||
"""
|
"""
|
||||||
Generates images synchronously using the Ideogram V2 model.
|
Generates images using the Ideogram V2 model.
|
||||||
|
|
||||||
Images links are available for a limited period of time; if you would like to keep the image, you must download it.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -536,10 +532,7 @@ class IdeogramV2(ComfyNodeABC):
|
|||||||
|
|
||||||
class IdeogramV3(ComfyNodeABC):
|
class IdeogramV3(ComfyNodeABC):
|
||||||
"""
|
"""
|
||||||
Generates images synchronously using the Ideogram V3 model.
|
Generates images using the Ideogram V3 model. Supports both regular image generation from text prompts and image editing with mask.
|
||||||
|
|
||||||
Supports both regular image generation from text prompts and image editing with mask.
|
|
||||||
Images links are available for a limited period of time; if you would like to keep the image, you must download it.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|||||||
@ -184,6 +184,33 @@ def validate_image_result_response(response) -> None:
|
|||||||
raise KlingApiError(error_msg)
|
raise KlingApiError(error_msg)
|
||||||
|
|
||||||
|
|
||||||
|
def validate_input_image(image: torch.Tensor) -> None:
|
||||||
|
"""
|
||||||
|
Validates the input image adheres to the expectations of the Kling API:
|
||||||
|
- The image resolution should not be less than 300*300px
|
||||||
|
- The aspect ratio of the image should be between 1:2.5 ~ 2.5:1
|
||||||
|
|
||||||
|
See: https://app.klingai.com/global/dev/document-api/apiReference/model/imageToVideo
|
||||||
|
"""
|
||||||
|
if len(image.shape) == 4:
|
||||||
|
height, width = image.shape[1], image.shape[2]
|
||||||
|
elif len(image.shape) == 3:
|
||||||
|
height, width = image.shape[0], image.shape[1]
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid image tensor shape.")
|
||||||
|
|
||||||
|
# Ensure minimum resolution is met
|
||||||
|
if height < 300:
|
||||||
|
raise ValueError("Image height must be at least 300px")
|
||||||
|
if width < 300:
|
||||||
|
raise ValueError("Image width must be at least 300px")
|
||||||
|
|
||||||
|
# Ensure aspect ratio is within acceptable range
|
||||||
|
aspect_ratio = width / height
|
||||||
|
if aspect_ratio < 1 / 2.5 or aspect_ratio > 2.5:
|
||||||
|
raise ValueError("Image aspect ratio must be between 1:2.5 and 2.5:1")
|
||||||
|
|
||||||
|
|
||||||
def get_camera_control_input_config(
|
def get_camera_control_input_config(
|
||||||
tooltip: str, default: float = 0.0
|
tooltip: str, default: float = 0.0
|
||||||
) -> tuple[IO, InputTypeOptions]:
|
) -> tuple[IO, InputTypeOptions]:
|
||||||
@ -530,7 +557,10 @@ class KlingImage2VideoNode(KlingNodeBase):
|
|||||||
return {
|
return {
|
||||||
"required": {
|
"required": {
|
||||||
"start_frame": model_field_to_node_input(
|
"start_frame": model_field_to_node_input(
|
||||||
IO.IMAGE, KlingImage2VideoRequest, "image"
|
IO.IMAGE,
|
||||||
|
KlingImage2VideoRequest,
|
||||||
|
"image",
|
||||||
|
tooltip="The reference image used to generate the video.",
|
||||||
),
|
),
|
||||||
"prompt": model_field_to_node_input(
|
"prompt": model_field_to_node_input(
|
||||||
IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True
|
IO.STRING, KlingImage2VideoRequest, "prompt", multiline=True
|
||||||
@ -607,9 +637,10 @@ class KlingImage2VideoNode(KlingNodeBase):
|
|||||||
auth_token: Optional[str] = None,
|
auth_token: Optional[str] = None,
|
||||||
) -> tuple[VideoFromFile]:
|
) -> tuple[VideoFromFile]:
|
||||||
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V)
|
validate_prompts(prompt, negative_prompt, MAX_PROMPT_LENGTH_I2V)
|
||||||
|
validate_input_image(start_frame)
|
||||||
|
|
||||||
if camera_control is not None:
|
if camera_control is not None:
|
||||||
# Camera control type for image 2 video is always simple
|
# Camera control type for image 2 video is always `simple`
|
||||||
camera_control.type = KlingCameraControlType.simple
|
camera_control.type = KlingCameraControlType.simple
|
||||||
|
|
||||||
initial_operation = SynchronousOperation(
|
initial_operation = SynchronousOperation(
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user