mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-05-14 02:57:24 +08:00
Compare commits
9 Commits
14fe5dbe0f
...
7c034737bb
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7c034737bb | ||
|
|
c011fb520c | ||
|
|
c945a433ae | ||
|
|
25757a53c9 | ||
|
|
1b25f1289e | ||
|
|
e35348aa53 | ||
|
|
cd8c7a2306 | ||
|
|
a95cbd2d7f | ||
|
|
554a67ac20 |
2
.github/workflows/stable-release.yml
vendored
2
.github/workflows/stable-release.yml
vendored
@ -145,6 +145,8 @@ jobs:
|
||||
cp -r ComfyUI/.ci/windows_${{ inputs.rel_name }}_base_files/* ./
|
||||
cp ../update_comfyui_and_python_dependencies.bat ./update/
|
||||
|
||||
echo 'local-portable' > ComfyUI/.comfy_environment
|
||||
|
||||
cd ..
|
||||
|
||||
"C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable
|
||||
|
||||
@ -418,8 +418,137 @@ class LTXV(LatentFormat):
|
||||
|
||||
class LTXAV(LTXV):
|
||||
def __init__(self):
|
||||
self.latent_rgb_factors = None
|
||||
self.latent_rgb_factors_bias = None
|
||||
self.latent_rgb_factors = [
|
||||
[ 0.0350, 0.0159, 0.0132],
|
||||
[ 0.0025, -0.0021, -0.0003],
|
||||
[ 0.0286, 0.0028, 0.0020],
|
||||
[ 0.0280, -0.0114, -0.0202],
|
||||
[-0.0186, 0.0073, 0.0092],
|
||||
[ 0.0027, 0.0097, -0.0113],
|
||||
[-0.0069, -0.0032, -0.0024],
|
||||
[-0.0323, -0.0370, -0.0457],
|
||||
[ 0.0174, 0.0164, 0.0106],
|
||||
[-0.0097, 0.0061, 0.0035],
|
||||
[-0.0130, -0.0042, -0.0012],
|
||||
[-0.0102, -0.0002, -0.0091],
|
||||
[-0.0025, 0.0063, 0.0161],
|
||||
[ 0.0003, 0.0037, 0.0108],
|
||||
[ 0.0152, 0.0082, 0.0143],
|
||||
[ 0.0317, 0.0203, 0.0312],
|
||||
[-0.0092, -0.0233, -0.0119],
|
||||
[-0.0405, -0.0226, -0.0023],
|
||||
[ 0.0376, 0.0397, 0.0352],
|
||||
[ 0.0171, -0.0043, -0.0095],
|
||||
[ 0.0482, 0.0341, 0.0213],
|
||||
[ 0.0031, -0.0046, -0.0018],
|
||||
[-0.0486, -0.0383, -0.0294],
|
||||
[-0.0071, -0.0272, -0.0123],
|
||||
[ 0.0320, 0.0218, 0.0289],
|
||||
[ 0.0327, 0.0088, -0.0116],
|
||||
[-0.0098, -0.0240, -0.0111],
|
||||
[ 0.0094, -0.0116, 0.0021],
|
||||
[ 0.0309, 0.0092, 0.0165],
|
||||
[-0.0065, -0.0077, -0.0107],
|
||||
[ 0.0179, 0.0114, 0.0038],
|
||||
[-0.0018, -0.0030, -0.0026],
|
||||
[-0.0002, 0.0076, -0.0029],
|
||||
[-0.0131, -0.0059, -0.0170],
|
||||
[ 0.0055, 0.0066, -0.0038],
|
||||
[ 0.0154, 0.0063, 0.0090],
|
||||
[ 0.0186, 0.0175, 0.0188],
|
||||
[-0.0166, -0.0381, -0.0428],
|
||||
[ 0.0121, 0.0015, -0.0153],
|
||||
[ 0.0118, 0.0050, 0.0019],
|
||||
[ 0.0125, 0.0259, 0.0231],
|
||||
[ 0.0046, 0.0130, 0.0081],
|
||||
[ 0.0271, 0.0250, 0.0250],
|
||||
[-0.0054, -0.0347, -0.0326],
|
||||
[-0.0438, -0.0262, -0.0228],
|
||||
[-0.0191, -0.0256, -0.0173],
|
||||
[-0.0205, -0.0058, 0.0042],
|
||||
[ 0.0404, 0.0434, 0.0346],
|
||||
[-0.0242, -0.0177, -0.0146],
|
||||
[ 0.0161, 0.0223, 0.0168],
|
||||
[-0.0240, -0.0320, -0.0299],
|
||||
[-0.0019, 0.0043, 0.0008],
|
||||
[-0.0060, -0.0133, -0.0244],
|
||||
[-0.0048, -0.0225, -0.0167],
|
||||
[ 0.0267, 0.0133, 0.0152],
|
||||
[ 0.0222, 0.0167, 0.0028],
|
||||
[ 0.0015, -0.0062, 0.0013],
|
||||
[-0.0241, -0.0178, -0.0079],
|
||||
[ 0.0040, -0.0081, -0.0097],
|
||||
[-0.0064, 0.0133, -0.0011],
|
||||
[-0.0204, -0.0231, -0.0304],
|
||||
[ 0.0011, -0.0011, 0.0145],
|
||||
[-0.0283, -0.0259, -0.0260],
|
||||
[ 0.0038, 0.0171, -0.0029],
|
||||
[ 0.0637, 0.0424, 0.0409],
|
||||
[ 0.0092, 0.0163, 0.0188],
|
||||
[ 0.0082, 0.0055, -0.0179],
|
||||
[-0.0177, -0.0286, -0.0147],
|
||||
[ 0.0171, 0.0242, 0.0398],
|
||||
[-0.0129, 0.0095, -0.0071],
|
||||
[-0.0154, 0.0036, 0.0128],
|
||||
[-0.0081, -0.0009, 0.0118],
|
||||
[-0.0067, -0.0178, -0.0230],
|
||||
[-0.0022, -0.0125, -0.0003],
|
||||
[-0.0032, -0.0039, -0.0022],
|
||||
[-0.0005, -0.0127, -0.0131],
|
||||
[-0.0143, -0.0157, -0.0165],
|
||||
[-0.0262, -0.0263, -0.0270],
|
||||
[ 0.0063, 0.0127, 0.0178],
|
||||
[ 0.0092, 0.0133, 0.0150],
|
||||
[-0.0106, -0.0068, 0.0032],
|
||||
[-0.0214, -0.0022, 0.0171],
|
||||
[-0.0104, -0.0266, -0.0362],
|
||||
[ 0.0021, 0.0048, -0.0005],
|
||||
[ 0.0345, 0.0431, 0.0402],
|
||||
[-0.0275, -0.0110, -0.0195],
|
||||
[ 0.0203, 0.0251, 0.0224],
|
||||
[ 0.0016, -0.0037, -0.0094],
|
||||
[ 0.0241, 0.0198, 0.0114],
|
||||
[-0.0003, 0.0027, 0.0141],
|
||||
[ 0.0012, -0.0052, -0.0084],
|
||||
[ 0.0057, -0.0028, -0.0163],
|
||||
[-0.0488, -0.0545, -0.0509],
|
||||
[-0.0076, -0.0025, -0.0014],
|
||||
[-0.0249, -0.0142, -0.0367],
|
||||
[ 0.0136, 0.0041, 0.0135],
|
||||
[ 0.0007, 0.0034, -0.0053],
|
||||
[-0.0068, -0.0109, 0.0029],
|
||||
[ 0.0006, -0.0237, -0.0094],
|
||||
[-0.0149, -0.0177, -0.0131],
|
||||
[-0.0105, 0.0039, 0.0216],
|
||||
[ 0.0242, 0.0200, 0.0180],
|
||||
[-0.0339, -0.0153, -0.0195],
|
||||
[ 0.0104, 0.0151, 0.0120],
|
||||
[-0.0043, 0.0089, 0.0047],
|
||||
[ 0.0157, -0.0030, 0.0008],
|
||||
[ 0.0126, 0.0102, -0.0040],
|
||||
[ 0.0040, 0.0114, 0.0137],
|
||||
[ 0.0423, 0.0473, 0.0436],
|
||||
[-0.0128, -0.0066, -0.0152],
|
||||
[-0.0337, -0.0087, -0.0026],
|
||||
[-0.0052, 0.0235, 0.0291],
|
||||
[ 0.0079, 0.0154, 0.0260],
|
||||
[-0.0539, -0.0377, -0.0358],
|
||||
[-0.0188, 0.0062, -0.0035],
|
||||
[-0.0186, 0.0041, -0.0083],
|
||||
[ 0.0045, -0.0049, 0.0053],
|
||||
[ 0.0172, 0.0071, 0.0042],
|
||||
[-0.0003, -0.0078, -0.0096],
|
||||
[-0.0209, -0.0132, -0.0135],
|
||||
[-0.0074, 0.0017, 0.0099],
|
||||
[-0.0038, 0.0070, 0.0014],
|
||||
[-0.0013, -0.0017, 0.0073],
|
||||
[ 0.0030, 0.0105, 0.0105],
|
||||
[ 0.0154, -0.0168, -0.0235],
|
||||
[-0.0108, -0.0038, 0.0047],
|
||||
[-0.0298, -0.0347, -0.0436],
|
||||
[-0.0206, -0.0189, -0.0139]
|
||||
]
|
||||
self.latent_rgb_factors_bias = [0.2796, 0.1101, -0.0047]
|
||||
|
||||
class HunyuanVideo(LatentFormat):
|
||||
latent_channels = 16
|
||||
|
||||
@ -26,6 +26,7 @@ import uuid
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
import tqdm
|
||||
|
||||
import comfy.float
|
||||
import comfy.hooks
|
||||
@ -1651,7 +1652,11 @@ class ModelPatcherDynamic(ModelPatcher):
|
||||
self.model.model_loaded_weight_memory += casted_buf.numel() * casted_buf.element_size()
|
||||
|
||||
force_load_stat = f" Force pre-loaded {len(self.backup)} weights: {self.model.model_loaded_weight_memory // 1024} KB." if len(self.backup) > 0 else ""
|
||||
logging.info(f"Model {self.model.__class__.__name__} prepared for dynamic VRAM loading. {allocated_size // (1024 ** 2)}MB Staged. {num_patches} patches attached.{force_load_stat}")
|
||||
log_key = (self.patches_uuid, allocated_size, num_patches, len(self.backup), self.model.model_loaded_weight_memory)
|
||||
in_loop = bool(getattr(tqdm.tqdm, "_instances", None))
|
||||
level = logging.DEBUG if in_loop and getattr(self, "_last_prepare_log_key", None) == log_key else logging.INFO
|
||||
self._last_prepare_log_key = log_key
|
||||
logging.log(level, f"Model {self.model.__class__.__name__} prepared for dynamic VRAM loading. {allocated_size // (1024 ** 2)}MB Staged. {num_patches} patches attached.{force_load_stat}")
|
||||
|
||||
self.model.device = device_to
|
||||
self.model.current_weight_patches_uuid = self.patches_uuid
|
||||
|
||||
@ -83,13 +83,16 @@ class GeminiImageModel(str, Enum):
|
||||
|
||||
async def create_image_parts(
|
||||
cls: type[IO.ComfyNode],
|
||||
images: Input.Image,
|
||||
images: Input.Image | list[Input.Image],
|
||||
image_limit: int = 0,
|
||||
) -> list[GeminiPart]:
|
||||
image_parts: list[GeminiPart] = []
|
||||
if image_limit < 0:
|
||||
raise ValueError("image_limit must be greater than or equal to 0 when creating Gemini image parts.")
|
||||
total_images = get_number_of_images(images)
|
||||
|
||||
# Accept either a single (possibly-batched) tensor or a list of them; share URL budget across all.
|
||||
images_list: list[Input.Image] = images if isinstance(images, list) else [images]
|
||||
total_images = sum(get_number_of_images(img) for img in images_list)
|
||||
if total_images <= 0:
|
||||
raise ValueError("No images provided to create_image_parts; at least one image is required.")
|
||||
|
||||
@ -98,10 +101,18 @@ async def create_image_parts(
|
||||
|
||||
# Number of images we'll send as URLs (fileData)
|
||||
num_url_images = min(effective_max, 10) # Vertex API max number of image links
|
||||
upload_kwargs: dict = {"wait_label": "Uploading reference images"}
|
||||
if effective_max > num_url_images:
|
||||
# Split path (e.g. 11+ images): suppress per-image counter to avoid a confusing dual-fraction label.
|
||||
upload_kwargs = {
|
||||
"wait_label": f"Uploading reference images ({num_url_images}+)",
|
||||
"show_batch_index": False,
|
||||
}
|
||||
reference_images_urls = await upload_images_to_comfyapi(
|
||||
cls,
|
||||
images,
|
||||
images_list,
|
||||
max_images=num_url_images,
|
||||
**upload_kwargs,
|
||||
)
|
||||
for reference_image_url in reference_images_urls:
|
||||
image_parts.append(
|
||||
@ -112,15 +123,22 @@ async def create_image_parts(
|
||||
)
|
||||
)
|
||||
)
|
||||
for idx in range(num_url_images, effective_max):
|
||||
image_parts.append(
|
||||
GeminiPart(
|
||||
inlineData=GeminiInlineData(
|
||||
mimeType=GeminiMimeType.image_png,
|
||||
data=tensor_to_base64_string(images[idx]),
|
||||
if effective_max > num_url_images:
|
||||
flat: list[torch.Tensor] = []
|
||||
for tensor in images_list:
|
||||
if len(tensor.shape) == 4:
|
||||
flat.extend(tensor[i] for i in range(tensor.shape[0]))
|
||||
else:
|
||||
flat.append(tensor)
|
||||
for idx in range(num_url_images, effective_max):
|
||||
image_parts.append(
|
||||
GeminiPart(
|
||||
inlineData=GeminiInlineData(
|
||||
mimeType=GeminiMimeType.image_png,
|
||||
data=tensor_to_base64_string(flat[idx]),
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
return image_parts
|
||||
|
||||
|
||||
@ -891,10 +909,6 @@ class GeminiNanoBanana2(IO.ComfyNode):
|
||||
"9:16",
|
||||
"16:9",
|
||||
"21:9",
|
||||
# "1:4",
|
||||
# "4:1",
|
||||
# "8:1",
|
||||
# "1:8",
|
||||
],
|
||||
default="auto",
|
||||
tooltip="If set to 'auto', matches your input image's aspect ratio; "
|
||||
@ -902,12 +916,7 @@ class GeminiNanoBanana2(IO.ComfyNode):
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=[
|
||||
# "512px",
|
||||
"1K",
|
||||
"2K",
|
||||
"4K",
|
||||
],
|
||||
options=["1K", "2K", "4K"],
|
||||
tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
@ -956,6 +965,7 @@ class GeminiNanoBanana2(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=GEMINI_IMAGE_2_PRICE_BADGE,
|
||||
is_deprecated=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@ -1016,6 +1026,197 @@ class GeminiNanoBanana2(IO.ComfyNode):
|
||||
)
|
||||
|
||||
|
||||
def _nano_banana_2_v2_model_inputs():
|
||||
return [
|
||||
IO.Combo.Input(
|
||||
"aspect_ratio",
|
||||
options=[
|
||||
"auto",
|
||||
"1:1",
|
||||
"2:3",
|
||||
"3:2",
|
||||
"3:4",
|
||||
"4:3",
|
||||
"4:5",
|
||||
"5:4",
|
||||
"9:16",
|
||||
"16:9",
|
||||
"21:9",
|
||||
"1:4",
|
||||
"4:1",
|
||||
"8:1",
|
||||
"1:8",
|
||||
],
|
||||
default="auto",
|
||||
tooltip="If set to 'auto', matches your input image's aspect ratio; "
|
||||
"if no image is provided, a 16:9 square is usually generated.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"resolution",
|
||||
options=["1K", "2K", "4K"],
|
||||
tooltip="Target output resolution. For 2K/4K the native Gemini upscaler is used.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"thinking_level",
|
||||
options=["MINIMAL", "HIGH"],
|
||||
),
|
||||
IO.Autogrow.Input(
|
||||
"images",
|
||||
template=IO.Autogrow.TemplateNames(
|
||||
IO.Image.Input("image"),
|
||||
names=[f"image_{i}" for i in range(1, 15)],
|
||||
min=0,
|
||||
),
|
||||
tooltip="Optional reference image(s). Up to 14 images total.",
|
||||
),
|
||||
IO.Custom("GEMINI_INPUT_FILES").Input(
|
||||
"files",
|
||||
optional=True,
|
||||
tooltip="Optional file(s) to use as context for the model. "
|
||||
"Accepts inputs from the Gemini Generate Content Input Files node.",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class GeminiNanoBanana2V2(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="GeminiNanoBanana2V2",
|
||||
display_name="Nano Banana 2",
|
||||
category="api node/image/Gemini",
|
||||
description="Generate or edit images synchronously via Google Vertex API.",
|
||||
inputs=[
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
tooltip="Text prompt describing the image to generate or the edits to apply. "
|
||||
"Include any constraints, styles, or details the model should follow.",
|
||||
default="",
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"model",
|
||||
options=[
|
||||
IO.DynamicCombo.Option(
|
||||
"Nano Banana 2 (Gemini 3.1 Flash Image)",
|
||||
_nano_banana_2_v2_model_inputs(),
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=42,
|
||||
min=0,
|
||||
max=0xFFFFFFFFFFFFFFFF,
|
||||
control_after_generate=True,
|
||||
tooltip="When the seed is fixed to a specific value, the model makes a best effort to provide "
|
||||
"the same response for repeated requests. Deterministic output isn't guaranteed. "
|
||||
"Also, changing the model or parameter settings, such as the temperature, "
|
||||
"can cause variations in the response even when you use the same seed value. "
|
||||
"By default, a random seed value is used.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"response_modalities",
|
||||
options=["IMAGE", "IMAGE+TEXT"],
|
||||
advanced=True,
|
||||
),
|
||||
IO.String.Input(
|
||||
"system_prompt",
|
||||
multiline=True,
|
||||
default=GEMINI_IMAGE_SYS_PROMPT,
|
||||
optional=True,
|
||||
tooltip="Foundational instructions that dictate an AI's behavior.",
|
||||
advanced=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
IO.String.Output(),
|
||||
IO.Image.Output(
|
||||
display_name="thought_image",
|
||||
tooltip="First image from the model's thinking process. "
|
||||
"Only available with thinking_level HIGH and IMAGE+TEXT modality.",
|
||||
),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "model.resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$r := $lookup(widgets, "model.resolution");
|
||||
$prices := {"1k": 0.0696, "2k": 0.1014, "4k": 0.154};
|
||||
{"type":"usd","usd": $lookup($prices, $r), "format":{"suffix":"/Image","approximate":true}}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
prompt: str,
|
||||
model: dict,
|
||||
seed: int,
|
||||
response_modalities: str,
|
||||
system_prompt: str = "",
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
model_choice = model["model"]
|
||||
if model_choice == "Nano Banana 2 (Gemini 3.1 Flash Image)":
|
||||
model_id = "gemini-3.1-flash-image-preview"
|
||||
else:
|
||||
model_id = model_choice
|
||||
|
||||
images = model.get("images") or {}
|
||||
parts: list[GeminiPart] = [GeminiPart(text=prompt)]
|
||||
if images:
|
||||
image_tensors: list[Input.Image] = [t for t in images.values() if t is not None]
|
||||
if image_tensors:
|
||||
if sum(get_number_of_images(t) for t in image_tensors) > 14:
|
||||
raise ValueError("The current maximum number of supported images is 14.")
|
||||
parts.extend(await create_image_parts(cls, image_tensors))
|
||||
files = model.get("files")
|
||||
if files is not None:
|
||||
parts.extend(files)
|
||||
|
||||
image_config = GeminiImageConfig(imageSize=model["resolution"])
|
||||
if model["aspect_ratio"] != "auto":
|
||||
image_config.aspectRatio = model["aspect_ratio"]
|
||||
|
||||
gemini_system_prompt = None
|
||||
if system_prompt:
|
||||
gemini_system_prompt = GeminiSystemInstructionContent(parts=[GeminiTextPart(text=system_prompt)], role=None)
|
||||
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/vertexai/gemini/{model_id}", method="POST"),
|
||||
data=GeminiImageGenerateContentRequest(
|
||||
contents=[
|
||||
GeminiContent(role=GeminiRole.user, parts=parts),
|
||||
],
|
||||
generationConfig=GeminiImageGenerationConfig(
|
||||
responseModalities=(["IMAGE"] if response_modalities == "IMAGE" else ["TEXT", "IMAGE"]),
|
||||
imageConfig=image_config,
|
||||
thinkingConfig=GeminiThinkingConfig(thinkingLevel=model["thinking_level"]),
|
||||
),
|
||||
systemInstruction=gemini_system_prompt,
|
||||
),
|
||||
response_model=GeminiGenerateContentResponse,
|
||||
price_extractor=calculate_tokens_price,
|
||||
)
|
||||
return IO.NodeOutput(
|
||||
await get_image_from_response(response),
|
||||
get_text_from_response(response),
|
||||
await get_image_from_response(response, thought=True),
|
||||
)
|
||||
|
||||
|
||||
class GeminiExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
@ -1024,6 +1225,7 @@ class GeminiExtension(ComfyExtension):
|
||||
GeminiImage,
|
||||
GeminiImage2,
|
||||
GeminiNanoBanana2,
|
||||
GeminiNanoBanana2V2,
|
||||
GeminiInputFiles,
|
||||
]
|
||||
|
||||
|
||||
@ -54,7 +54,12 @@ class GrokImageNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["grok-imagine-image-pro", "grok-imagine-image", "grok-imagine-image-beta"],
|
||||
options=[
|
||||
"grok-imagine-image-quality",
|
||||
"grok-imagine-image-pro",
|
||||
"grok-imagine-image",
|
||||
"grok-imagine-image-beta",
|
||||
],
|
||||
),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
@ -111,10 +116,12 @@ class GrokImageNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images", "resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$rate := $contains(widgets.model, "pro") ? 0.07 : 0.02;
|
||||
$rate := widgets.model = "grok-imagine-image-quality"
|
||||
? (widgets.resolution = "1k" ? 0.05 : 0.07)
|
||||
: ($contains(widgets.model, "pro") ? 0.07 : 0.02);
|
||||
{"type":"usd","usd": $rate * widgets.number_of_images}
|
||||
)
|
||||
""",
|
||||
@ -167,7 +174,12 @@ class GrokImageEditNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["grok-imagine-image-pro", "grok-imagine-image", "grok-imagine-image-beta"],
|
||||
options=[
|
||||
"grok-imagine-image-quality",
|
||||
"grok-imagine-image-pro",
|
||||
"grok-imagine-image",
|
||||
"grok-imagine-image-beta",
|
||||
],
|
||||
),
|
||||
IO.Image.Input("image", display_name="images"),
|
||||
IO.String.Input(
|
||||
@ -228,11 +240,19 @@ class GrokImageEditNode(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "number_of_images", "resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$rate := $contains(widgets.model, "pro") ? 0.07 : 0.02;
|
||||
{"type":"usd","usd": 0.002 + $rate * widgets.number_of_images}
|
||||
$isQualityModel := widgets.model = "grok-imagine-image-quality";
|
||||
$isPro := $contains(widgets.model, "pro");
|
||||
$rate := $isQualityModel
|
||||
? (widgets.resolution = "1k" ? 0.05 : 0.07)
|
||||
: ($isPro ? 0.07 : 0.02);
|
||||
$base := $isQualityModel ? 0.01 : 0.002;
|
||||
$output := $rate * widgets.number_of_images;
|
||||
$isPro
|
||||
? {"type":"usd","usd": $base + $output}
|
||||
: {"type":"range_usd","min_usd": $base + $output, "max_usd": 3 * $base + $output}
|
||||
)
|
||||
""",
|
||||
),
|
||||
|
||||
@ -2787,11 +2787,15 @@ class MotionControl(IO.ComfyNode):
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["mode"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["mode", "model"]),
|
||||
expr="""
|
||||
(
|
||||
$prices := {"std": 0.07, "pro": 0.112};
|
||||
{"type":"usd","usd": $lookup($prices, widgets.mode), "format":{"suffix":"/second"}}
|
||||
$prices := {
|
||||
"kling-v3": {"std": 0.126, "pro": 0.168},
|
||||
"kling-v2-6": {"std": 0.07, "pro": 0.112}
|
||||
};
|
||||
$modelPrices := $lookup($prices, widgets.model);
|
||||
{"type":"usd","usd": $lookup($modelPrices, widgets.mode), "format":{"suffix":"/second"}}
|
||||
)
|
||||
""",
|
||||
),
|
||||
|
||||
@ -763,7 +763,7 @@ class SamplerCustom(io.ComfyNode):
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
x0_output = {}
|
||||
callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output)
|
||||
callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output, shape=latent_image.shape if latent_image.is_nested else None)
|
||||
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed)
|
||||
@ -957,7 +957,7 @@ class SamplerCustomAdvanced(io.ComfyNode):
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
x0_output = {}
|
||||
callback = latent_preview.prepare_callback(guider.model_patcher, sigmas.shape[-1] - 1, x0_output)
|
||||
callback = latent_preview.prepare_callback(guider.model_patcher, sigmas.shape[-1] - 1, x0_output, shape=latent_image.shape if latent_image.is_nested else None)
|
||||
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
samples = guider.sample(noise.generate_noise(latent), latent_image, sampler, sigmas, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise.seed)
|
||||
|
||||
@ -7,6 +7,7 @@ import comfy.model_management
|
||||
import folder_paths
|
||||
import comfy.utils
|
||||
import logging
|
||||
import math
|
||||
|
||||
default_preview_method = args.preview_method
|
||||
|
||||
@ -109,7 +110,7 @@ def get_previewer(device, latent_format):
|
||||
previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias, latent_format.latent_rgb_factors_reshape)
|
||||
return previewer
|
||||
|
||||
def prepare_callback(model, steps, x0_output_dict=None):
|
||||
def prepare_callback(model, steps, x0_output_dict=None, shape=None):
|
||||
preview_format = "JPEG"
|
||||
if preview_format not in ["JPEG", "PNG"]:
|
||||
preview_format = "JPEG"
|
||||
@ -121,6 +122,10 @@ def prepare_callback(model, steps, x0_output_dict=None):
|
||||
if x0_output_dict is not None:
|
||||
x0_output_dict["x0"] = x0
|
||||
|
||||
if shape is not None:
|
||||
cut = math.prod(shape[1:])
|
||||
x0 = x0[:, :, :cut].reshape([x0.shape[0]] + list(shape)[1:])
|
||||
|
||||
preview_bytes = None
|
||||
if previewer:
|
||||
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
|
||||
|
||||
2
nodes.py
2
nodes.py
@ -1532,7 +1532,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
|
||||
if "noise_mask" in latent:
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
callback = latent_preview.prepare_callback(model, steps)
|
||||
callback = latent_preview.prepare_callback(model, steps, shape=latent_image.shape if latent_image.is_nested else None)
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
|
||||
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.43.17
|
||||
comfyui-workflow-templates==0.9.69
|
||||
comfyui-workflow-templates==0.9.72
|
||||
comfyui-embedded-docs==0.4.4
|
||||
torch
|
||||
torchsde
|
||||
|
||||
Loading…
Reference in New Issue
Block a user