mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-08 04:22:36 +08:00
Compare commits
29 Commits
4271cf7431
...
5f7c67e25f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5f7c67e25f | ||
|
|
ac91c340f4 | ||
|
|
2db3b0ff90 | ||
|
|
6516ab335d | ||
|
|
ad53e78f11 | ||
|
|
29011ba87e | ||
|
|
cd4985e2f3 | ||
|
|
bfe31d0b9d | ||
|
|
fa71050a07 | ||
|
|
7795a4e86c | ||
|
|
c4c388ffc8 | ||
|
|
c804c0c12e | ||
|
|
6c9110564b | ||
|
|
b4dcbdfac7 | ||
|
|
2c859e9558 | ||
|
|
174d91c9ed | ||
|
|
e1cf4f7420 | ||
|
|
357f89a4bf | ||
|
|
477f330415 | ||
|
|
36e19df686 | ||
|
|
aba97d6ada | ||
|
|
7419345b76 | ||
|
|
41b4c3ea73 | ||
|
|
5b27c661c6 | ||
|
|
6572cbb61d | ||
|
|
4f12985e45 | ||
|
|
847e3cc3a2 | ||
|
|
e7ebda4b61 | ||
|
|
eeee0f5b1b |
31
.dockerignore
Normal file
31
.dockerignore
Normal file
@ -0,0 +1,31 @@
|
||||
# This file should remain in sync with .gitignore. If you need to make changes,
|
||||
# please add a comment explaining why. For items that must be removed, comment
|
||||
# them out instead of deleting them.
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
/output/
|
||||
/input/
|
||||
# This file prevents the image from building and would be overwritten by the
|
||||
# /data volume in any case.
|
||||
#!/input/example.png
|
||||
/models/
|
||||
/temp/
|
||||
/custom_nodes/
|
||||
!custom_nodes/example_node.py.example
|
||||
extra_model_paths.yaml
|
||||
/.vs
|
||||
.vscode/
|
||||
.idea/
|
||||
venv/
|
||||
.venv/
|
||||
/web/extensions/*
|
||||
!/web/extensions/logging.js.example
|
||||
!/web/extensions/core/
|
||||
/tests-ui/data/object_info.json
|
||||
/user/
|
||||
*.log
|
||||
web_custom_versions/
|
||||
.DS_Store
|
||||
openapi.yaml
|
||||
filtered-openapi.yaml
|
||||
uv.lock
|
||||
3
.gitattributes
vendored
3
.gitattributes
vendored
@ -1,3 +1,6 @@
|
||||
/web/assets/** linguist-generated
|
||||
/web/** linguist-vendored
|
||||
comfy_api_nodes/apis/__init__.py linguist-generated
|
||||
# Force LF eol for Docker entrypoint (fix "exec: no such file or directory"
|
||||
# error with CRLF checkouts)
|
||||
entrypoint.sh text eol=lf
|
||||
|
||||
6
.github/workflows/release-stable-all.yml
vendored
6
.github/workflows/release-stable-all.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
git_tag: ${{ inputs.git_tag }}
|
||||
cache_tag: "cu130"
|
||||
python_minor: "13"
|
||||
python_patch: "9"
|
||||
python_patch: "11"
|
||||
rel_name: "nvidia"
|
||||
rel_extra_name: ""
|
||||
test_release: true
|
||||
@ -65,11 +65,11 @@ jobs:
|
||||
contents: "write"
|
||||
packages: "write"
|
||||
pull-requests: "read"
|
||||
name: "Release AMD ROCm 7.1.1"
|
||||
name: "Release AMD ROCm 7.2"
|
||||
uses: ./.github/workflows/stable-release.yml
|
||||
with:
|
||||
git_tag: ${{ inputs.git_tag }}
|
||||
cache_tag: "rocm711"
|
||||
cache_tag: "rocm72"
|
||||
python_minor: "12"
|
||||
python_patch: "10"
|
||||
rel_name: "amd"
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
# If you modify this file, remember to update .dockerignore as well.
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
/output/
|
||||
|
||||
85
Dockerfile
Normal file
85
Dockerfile
Normal file
@ -0,0 +1,85 @@
|
||||
# Docker buildfile for the ComfyUI image, with support for hardware
|
||||
# acceleration, file ownership synchronization, custom nodes, and custom node
|
||||
# managers.
|
||||
|
||||
# While Python 3.13 is well supported by ComfyUI, some older custom node packs
|
||||
# may not work correctly with this version, which is why we're staying on Python
|
||||
# 3.12 for now.
|
||||
#
|
||||
# Users are free to try different base Python image tags (e.g., 3.13, alpine,
|
||||
# *-slim), but for maintainability, only one base version is officially
|
||||
# supported at a time.
|
||||
FROM python:3.12.12-trixie
|
||||
|
||||
# Install cmake, which is an indirect installation dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends cmake
|
||||
|
||||
# Create a regular user whose UID and GID will match the host user's at runtime.
|
||||
# Also create a home directory for this user (-m), as some common Python tools
|
||||
# (such as uv) interact with the user’s home directory.
|
||||
RUN useradd -m comfyui
|
||||
|
||||
# Install ComfyUI under /comfyui and set folder ownership to the comfyui user.
|
||||
# With the legacy Docker builder (DOCKER_BUILDKIT=0), WORKDIR always creates missing
|
||||
# directories as root (even if a different USER is active). To ensure the comfyui user
|
||||
# can write inside, ownership must be fixed manually.
|
||||
WORKDIR /comfyui
|
||||
RUN chown comfyui:comfyui .
|
||||
|
||||
# Install ComfyUI as ComfyUI
|
||||
USER comfyui
|
||||
|
||||
# Set up a Python virtual environment and configure it as the default Python.
|
||||
#
|
||||
# Reasons for using a virtual environment:
|
||||
# - Some custom nodes use third-party tools like uv, which do not support
|
||||
# user-level installations.
|
||||
# - Custom node managers may install or update dependencies as the regular user,
|
||||
# so a global installation is not an option.
|
||||
# This leaves virtual environments as the only viable choice.
|
||||
RUN python -m venv .venv
|
||||
ENV PATH="/comfyui/.venv/bin:$PATH"
|
||||
|
||||
# Install ComfyUI's Python dependencies. Although dependency keeping is also
|
||||
# performed at startup, building ComfyUI's base dependencies into the image
|
||||
# significantly speeds up each containers' first run.
|
||||
#
|
||||
# Since this step takes a long time to complete, it's performed early to take
|
||||
# advantage of Docker's build cache, thereby accelerating subsequent builds.
|
||||
COPY requirements.txt manager_requirements.txt ./
|
||||
RUN pip install --no-cache-dir --disable-pip-version-check \
|
||||
-r requirements.txt
|
||||
|
||||
# Install ComfyUI
|
||||
COPY . .
|
||||
|
||||
# Purely declarative: inform Docker and image users that this image is designed
|
||||
# to listen on port 8188 for the web GUI.
|
||||
EXPOSE 8188
|
||||
|
||||
# Declare persistent volumes. We assign one volume per data directory to match
|
||||
# ComfyUI’s natural file layout and to let users selectively choose which
|
||||
# directories they want to mount.
|
||||
VOLUME /comfyui/.venv
|
||||
VOLUME /comfyui/custom_nodes
|
||||
VOLUME /comfyui/input
|
||||
VOLUME /comfyui/models
|
||||
VOLUME /comfyui/output
|
||||
VOLUME /comfyui/temp
|
||||
VOLUME /comfyui/user
|
||||
VOLUME /home/comfyui
|
||||
|
||||
# Switch back to root to run the entrypoint and to install additional system
|
||||
# dependencies
|
||||
USER root
|
||||
|
||||
# Configure entrypoint
|
||||
RUN chmod +x entrypoint.sh
|
||||
ENTRYPOINT [ "./entrypoint.sh" ]
|
||||
CMD [ "python", "./main.py" ]
|
||||
|
||||
# Install additional system dependencies
|
||||
ARG APT_EXTRA_PACKAGES
|
||||
RUN apt-get install -y --no-install-recommends $APT_EXTRA_PACKAGES \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
28
README.md
28
README.md
@ -46,6 +46,12 @@ ComfyUI lets you design and execute advanced stable diffusion pipelines using a
|
||||
- Get the latest commits and completely portable.
|
||||
- Available on Windows.
|
||||
|
||||
#### [Docker Install](#running-with-docker)
|
||||
- Run ComfyUI inside an isolated Docker container
|
||||
- Most secure way to run ComfyUI and custom node packs
|
||||
- Requires Docker and Docker Compose
|
||||
- Supports NVIDIA GPUs (Not tested on other hardware.)
|
||||
|
||||
#### [Manual Install](#manual-install-windows-linux)
|
||||
Supports all operating systems and GPU types (NVIDIA, AMD, Intel, Apple Silicon, Ascend).
|
||||
|
||||
@ -350,6 +356,28 @@ For models compatible with Iluvatar Extension for PyTorch. Here's a step-by-step
|
||||
| `--enable-manager-legacy-ui` | Use the legacy manager UI instead of the new UI (requires `--enable-manager`) |
|
||||
| `--disable-manager-ui` | Disable the manager UI and endpoints while keeping background features like security checks and scheduled installation completion (requires `--enable-manager`) |
|
||||
|
||||
## Running with Docker
|
||||
|
||||
Start by installing Docker, Docker Compose, and the NVIDIA Container Toolkit on
|
||||
your host. Next, edit `compose.yaml` and update the `UID` and `GID` variables to
|
||||
match your user. Additional fields are documented in the file for further
|
||||
customization.
|
||||
|
||||
Once ready, build and run the image locally:
|
||||
|
||||
```shell
|
||||
# (Re)build the Docker image. Run this before the first start, after updating
|
||||
# ComfyUI, or after changing any build arguments in `compose.yaml`.
|
||||
docker compose build
|
||||
# Start ComfyUI. This reuses the most recently built image.
|
||||
docker compose up
|
||||
```
|
||||
|
||||
To stop and remove the container along with its volumes, run:
|
||||
|
||||
```shell
|
||||
docker compose down -v
|
||||
```
|
||||
|
||||
# Running
|
||||
|
||||
|
||||
@ -479,10 +479,12 @@ class WanVAE(nn.Module):
|
||||
|
||||
def encode(self, x):
|
||||
conv_idx = [0]
|
||||
feat_map = [None] * count_conv3d(self.decoder)
|
||||
## cache
|
||||
t = x.shape[2]
|
||||
iter_ = 1 + (t - 1) // 4
|
||||
feat_map = None
|
||||
if iter_ > 1:
|
||||
feat_map = [None] * count_conv3d(self.decoder)
|
||||
## 对encode输入的x,按时间拆分为1、4、4、4....
|
||||
for i in range(iter_):
|
||||
conv_idx = [0]
|
||||
@ -502,10 +504,11 @@ class WanVAE(nn.Module):
|
||||
|
||||
def decode(self, z):
|
||||
conv_idx = [0]
|
||||
feat_map = [None] * count_conv3d(self.decoder)
|
||||
# z: [b,c,t,h,w]
|
||||
|
||||
iter_ = z.shape[2]
|
||||
feat_map = None
|
||||
if iter_ > 1:
|
||||
feat_map = [None] * count_conv3d(self.decoder)
|
||||
x = self.conv2(z)
|
||||
for i in range(iter_):
|
||||
conv_idx = [0]
|
||||
|
||||
@ -13,17 +13,6 @@ class Text2ImageTaskCreationRequest(BaseModel):
|
||||
watermark: bool | None = Field(False)
|
||||
|
||||
|
||||
class Image2ImageTaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
prompt: str = Field(...)
|
||||
response_format: str | None = Field("url")
|
||||
image: str = Field(..., description="Base64 encoded string or image URL")
|
||||
size: str | None = Field("adaptive")
|
||||
seed: int | None = Field(..., ge=0, le=2147483647)
|
||||
guidance_scale: float | None = Field(..., ge=1.0, le=10.0)
|
||||
watermark: bool | None = Field(False)
|
||||
|
||||
|
||||
class Seedream4Options(BaseModel):
|
||||
max_images: int = Field(15)
|
||||
|
||||
|
||||
122
comfy_api_nodes/apis/magnific.py
Normal file
122
comfy_api_nodes/apis/magnific.py
Normal file
@ -0,0 +1,122 @@
|
||||
from typing import TypedDict
|
||||
|
||||
from pydantic import AliasChoices, BaseModel, Field, model_validator
|
||||
|
||||
|
||||
class InputPortraitMode(TypedDict):
|
||||
portrait_mode: str
|
||||
portrait_style: str
|
||||
portrait_beautifier: str
|
||||
|
||||
|
||||
class InputAdvancedSettings(TypedDict):
|
||||
advanced_settings: str
|
||||
whites: int
|
||||
blacks: int
|
||||
brightness: int
|
||||
contrast: int
|
||||
saturation: int
|
||||
engine: str
|
||||
transfer_light_a: str
|
||||
transfer_light_b: str
|
||||
fixed_generation: bool
|
||||
|
||||
|
||||
class InputSkinEnhancerMode(TypedDict):
|
||||
mode: str
|
||||
skin_detail: int
|
||||
optimized_for: str
|
||||
|
||||
|
||||
class ImageUpscalerCreativeRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
scale_factor: str = Field(...)
|
||||
optimized_for: str = Field(...)
|
||||
prompt: str | None = Field(None)
|
||||
creativity: int = Field(...)
|
||||
hdr: int = Field(...)
|
||||
resemblance: int = Field(...)
|
||||
fractality: int = Field(...)
|
||||
engine: str = Field(...)
|
||||
|
||||
|
||||
class ImageUpscalerPrecisionV2Request(BaseModel):
|
||||
image: str = Field(...)
|
||||
sharpen: int = Field(...)
|
||||
smart_grain: int = Field(...)
|
||||
ultra_detail: int = Field(...)
|
||||
flavor: str = Field(...)
|
||||
scale_factor: int = Field(...)
|
||||
|
||||
|
||||
class ImageRelightAdvancedSettingsRequest(BaseModel):
|
||||
whites: int = Field(...)
|
||||
blacks: int = Field(...)
|
||||
brightness: int = Field(...)
|
||||
contrast: int = Field(...)
|
||||
saturation: int = Field(...)
|
||||
engine: str = Field(...)
|
||||
transfer_light_a: str = Field(...)
|
||||
transfer_light_b: str = Field(...)
|
||||
fixed_generation: bool = Field(...)
|
||||
|
||||
|
||||
class ImageRelightRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
prompt: str | None = Field(None)
|
||||
transfer_light_from_reference_image: str | None = Field(None)
|
||||
light_transfer_strength: int = Field(...)
|
||||
interpolate_from_original: bool = Field(...)
|
||||
change_background: bool = Field(...)
|
||||
style: str = Field(...)
|
||||
preserve_details: bool = Field(...)
|
||||
advanced_settings: ImageRelightAdvancedSettingsRequest | None = Field(...)
|
||||
|
||||
|
||||
class ImageStyleTransferRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
reference_image: str = Field(...)
|
||||
prompt: str | None = Field(None)
|
||||
style_strength: int = Field(...)
|
||||
structure_strength: int = Field(...)
|
||||
is_portrait: bool = Field(...)
|
||||
portrait_style: str | None = Field(...)
|
||||
portrait_beautifier: str | None = Field(...)
|
||||
flavor: str = Field(...)
|
||||
engine: str = Field(...)
|
||||
fixed_generation: bool = Field(...)
|
||||
|
||||
|
||||
class ImageSkinEnhancerCreativeRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
sharpen: int = Field(...)
|
||||
smart_grain: int = Field(...)
|
||||
|
||||
|
||||
class ImageSkinEnhancerFaithfulRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
sharpen: int = Field(...)
|
||||
smart_grain: int = Field(...)
|
||||
skin_detail: int = Field(...)
|
||||
|
||||
|
||||
class ImageSkinEnhancerFlexibleRequest(BaseModel):
|
||||
image: str = Field(...)
|
||||
sharpen: int = Field(...)
|
||||
smart_grain: int = Field(...)
|
||||
optimized_for: str = Field(...)
|
||||
|
||||
|
||||
class TaskResponse(BaseModel):
|
||||
"""Unified response model that handles both wrapped and unwrapped API responses."""
|
||||
|
||||
task_id: str = Field(...)
|
||||
status: str = Field(validation_alias=AliasChoices("status", "task_status"))
|
||||
generated: list[str] | None = Field(None)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def unwrap_data(cls, values: dict) -> dict:
|
||||
if "data" in values and isinstance(values["data"], dict):
|
||||
return values["data"]
|
||||
return values
|
||||
@ -9,7 +9,6 @@ from comfy_api_nodes.apis.bytedance import (
|
||||
RECOMMENDED_PRESETS,
|
||||
RECOMMENDED_PRESETS_SEEDREAM_4,
|
||||
VIDEO_TASKS_EXECUTION_TIME,
|
||||
Image2ImageTaskCreationRequest,
|
||||
Image2VideoTaskCreationRequest,
|
||||
ImageTaskCreationResponse,
|
||||
Seedream4Options,
|
||||
@ -174,99 +173,6 @@ class ByteDanceImageNode(IO.ComfyNode):
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
|
||||
|
||||
|
||||
class ByteDanceImageEditNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="ByteDanceImageEditNode",
|
||||
display_name="ByteDance Image Edit",
|
||||
category="api node/image/ByteDance",
|
||||
description="Edit images using ByteDance models via api based on prompt",
|
||||
inputs=[
|
||||
IO.Combo.Input("model", options=["seededit-3-0-i2i-250628"]),
|
||||
IO.Image.Input(
|
||||
"image",
|
||||
tooltip="The base image to edit",
|
||||
),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Instruction to edit image",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"seed",
|
||||
default=0,
|
||||
min=0,
|
||||
max=2147483647,
|
||||
step=1,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
control_after_generate=True,
|
||||
tooltip="Seed to use for generation",
|
||||
optional=True,
|
||||
),
|
||||
IO.Float.Input(
|
||||
"guidance_scale",
|
||||
default=5.5,
|
||||
min=1.0,
|
||||
max=10.0,
|
||||
step=0.01,
|
||||
display_mode=IO.NumberDisplay.number,
|
||||
tooltip="Higher value makes the image follow the prompt more closely",
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"watermark",
|
||||
default=False,
|
||||
tooltip='Whether to add an "AI generated" watermark to the image',
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
is_deprecated=True,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
model: str,
|
||||
image: Input.Image,
|
||||
prompt: str,
|
||||
seed: int,
|
||||
guidance_scale: float,
|
||||
watermark: bool,
|
||||
) -> IO.NodeOutput:
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
validate_image_aspect_ratio(image, (1, 3), (3, 1))
|
||||
source_url = (await upload_images_to_comfyapi(cls, image, max_images=1, mime_type="image/png"))[0]
|
||||
payload = Image2ImageTaskCreationRequest(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
image=source_url,
|
||||
seed=seed,
|
||||
guidance_scale=guidance_scale,
|
||||
watermark=watermark,
|
||||
)
|
||||
response = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=BYTEPLUS_IMAGE_ENDPOINT, method="POST"),
|
||||
data=payload,
|
||||
response_model=ImageTaskCreationResponse,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(get_image_url_from_response(response)))
|
||||
|
||||
|
||||
class ByteDanceSeedreamNode(IO.ComfyNode):
|
||||
|
||||
@classmethod
|
||||
@ -1101,7 +1007,6 @@ class ByteDanceExtension(ComfyExtension):
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
ByteDanceImageNode,
|
||||
ByteDanceImageEditNode,
|
||||
ByteDanceSeedreamNode,
|
||||
ByteDanceTextToVideoNode,
|
||||
ByteDanceImageToVideoNode,
|
||||
|
||||
889
comfy_api_nodes/nodes_magnific.py
Normal file
889
comfy_api_nodes/nodes_magnific.py
Normal file
@ -0,0 +1,889 @@
|
||||
import math
|
||||
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import IO, ComfyExtension, Input
|
||||
from comfy_api_nodes.apis.magnific import (
|
||||
ImageRelightAdvancedSettingsRequest,
|
||||
ImageRelightRequest,
|
||||
ImageSkinEnhancerCreativeRequest,
|
||||
ImageSkinEnhancerFaithfulRequest,
|
||||
ImageSkinEnhancerFlexibleRequest,
|
||||
ImageStyleTransferRequest,
|
||||
ImageUpscalerCreativeRequest,
|
||||
ImageUpscalerPrecisionV2Request,
|
||||
InputAdvancedSettings,
|
||||
InputPortraitMode,
|
||||
InputSkinEnhancerMode,
|
||||
TaskResponse,
|
||||
)
|
||||
from comfy_api_nodes.util import (
|
||||
ApiEndpoint,
|
||||
download_url_to_image_tensor,
|
||||
downscale_image_tensor,
|
||||
get_image_dimensions,
|
||||
get_number_of_images,
|
||||
poll_op,
|
||||
sync_op,
|
||||
upload_images_to_comfyapi,
|
||||
validate_image_aspect_ratio,
|
||||
validate_image_dimensions,
|
||||
)
|
||||
|
||||
|
||||
class MagnificImageUpscalerCreativeNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="MagnificImageUpscalerCreativeNode",
|
||||
display_name="Magnific Image Upscale (Creative)",
|
||||
category="api node/image/Magnific",
|
||||
description="Prompt‑guided enhancement, stylization, and 2x/4x/8x/16x upscaling. "
|
||||
"Maximum output: 25.3 megapixels.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.String.Input("prompt", multiline=True, default=""),
|
||||
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
|
||||
IO.Combo.Input(
|
||||
"optimized_for",
|
||||
options=[
|
||||
"standard",
|
||||
"soft_portraits",
|
||||
"hard_portraits",
|
||||
"art_n_illustration",
|
||||
"videogame_assets",
|
||||
"nature_n_landscapes",
|
||||
"films_n_photography",
|
||||
"3d_renders",
|
||||
"science_fiction_n_horror",
|
||||
],
|
||||
),
|
||||
IO.Int.Input("creativity", min=-10, max=10, default=0, display_mode=IO.NumberDisplay.slider),
|
||||
IO.Int.Input(
|
||||
"hdr",
|
||||
min=-10,
|
||||
max=10,
|
||||
default=0,
|
||||
tooltip="The level of definition and detail.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"resemblance",
|
||||
min=-10,
|
||||
max=10,
|
||||
default=0,
|
||||
tooltip="The level of resemblance to the original image.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"fractality",
|
||||
min=-10,
|
||||
max=10,
|
||||
default=0,
|
||||
tooltip="The strength of the prompt and intricacy per square pixel.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"engine",
|
||||
options=["automatic", "magnific_illusio", "magnific_sharpy", "magnific_sparkle"],
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"auto_downscale",
|
||||
default=False,
|
||||
tooltip="Automatically downscale input image if output would exceed maximum pixel limit.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
|
||||
expr="""
|
||||
(
|
||||
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
|
||||
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
prompt: str,
|
||||
scale_factor: str,
|
||||
optimized_for: str,
|
||||
creativity: int,
|
||||
hdr: int,
|
||||
resemblance: int,
|
||||
fractality: int,
|
||||
engine: str,
|
||||
auto_downscale: bool,
|
||||
) -> IO.NodeOutput:
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_dimensions(image, min_height=160, min_width=160)
|
||||
|
||||
max_output_pixels = 25_300_000
|
||||
height, width = get_image_dimensions(image)
|
||||
requested_scale = int(scale_factor.rstrip("x"))
|
||||
output_pixels = height * width * requested_scale * requested_scale
|
||||
|
||||
if output_pixels > max_output_pixels:
|
||||
if auto_downscale:
|
||||
# Find optimal scale factor that doesn't require >2x downscale.
|
||||
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
|
||||
input_pixels = width * height
|
||||
scale = 2
|
||||
max_input_pixels = max_output_pixels // 4
|
||||
for candidate in [16, 8, 4, 2]:
|
||||
if candidate > requested_scale:
|
||||
continue
|
||||
scale_output_pixels = input_pixels * candidate * candidate
|
||||
if scale_output_pixels <= max_output_pixels:
|
||||
scale = candidate
|
||||
max_input_pixels = None
|
||||
break
|
||||
downscale_ratio = math.sqrt(scale_output_pixels / max_output_pixels)
|
||||
if downscale_ratio <= 2.0:
|
||||
scale = candidate
|
||||
max_input_pixels = max_output_pixels // (candidate * candidate)
|
||||
break
|
||||
|
||||
if max_input_pixels is not None:
|
||||
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
|
||||
scale_factor = f"{scale}x"
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Output size ({width * requested_scale}x{height * requested_scale} = {output_pixels:,} pixels) "
|
||||
f"exceeds maximum allowed size of {max_output_pixels:,} pixels. "
|
||||
f"Use a smaller input image or lower scale factor."
|
||||
)
|
||||
|
||||
initial_res = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler", method="POST"),
|
||||
response_model=TaskResponse,
|
||||
data=ImageUpscalerCreativeRequest(
|
||||
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
|
||||
scale_factor=scale_factor,
|
||||
optimized_for=optimized_for,
|
||||
creativity=creativity,
|
||||
hdr=hdr,
|
||||
resemblance=resemblance,
|
||||
fractality=fractality,
|
||||
engine=engine,
|
||||
prompt=prompt if prompt else None,
|
||||
),
|
||||
)
|
||||
final_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler/{initial_res.task_id}"),
|
||||
response_model=TaskResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
poll_interval=10.0,
|
||||
max_poll_attempts=480,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
|
||||
|
||||
|
||||
class MagnificImageUpscalerPreciseV2Node(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="MagnificImageUpscalerPreciseV2Node",
|
||||
display_name="Magnific Image Upscale (Precise V2)",
|
||||
category="api node/image/Magnific",
|
||||
description="High-fidelity upscaling with fine control over sharpness, grain, and detail. "
|
||||
"Maximum output: 10060×10060 pixels.",
|
||||
inputs=[
|
||||
IO.Image.Input("image"),
|
||||
IO.Combo.Input("scale_factor", options=["2x", "4x", "8x", "16x"]),
|
||||
IO.Combo.Input(
|
||||
"flavor",
|
||||
options=["sublime", "photo", "photo_denoiser"],
|
||||
tooltip="Processing style: "
|
||||
"sublime for general use, photo for photographs, photo_denoiser for noisy photos.",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"sharpen",
|
||||
min=0,
|
||||
max=100,
|
||||
default=7,
|
||||
tooltip="Image sharpness intensity. Higher values increase edge definition and clarity.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"smart_grain",
|
||||
min=0,
|
||||
max=100,
|
||||
default=7,
|
||||
tooltip="Intelligent grain/texture enhancement to prevent the image from "
|
||||
"looking too smooth or artificial.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"ultra_detail",
|
||||
min=0,
|
||||
max=100,
|
||||
default=30,
|
||||
tooltip="Controls fine detail, textures, and micro-details added during upscaling.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"auto_downscale",
|
||||
default=False,
|
||||
tooltip="Automatically downscale input image if output would exceed maximum resolution.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["scale_factor"]),
|
||||
expr="""
|
||||
(
|
||||
$max := widgets.scale_factor = "2x" ? 1.326 : 1.657;
|
||||
{"type": "range_usd", "min_usd": 0.11, "max_usd": $max}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
scale_factor: str,
|
||||
flavor: str,
|
||||
sharpen: int,
|
||||
smart_grain: int,
|
||||
ultra_detail: int,
|
||||
auto_downscale: bool,
|
||||
) -> IO.NodeOutput:
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_dimensions(image, min_height=160, min_width=160)
|
||||
|
||||
max_output_dimension = 10060
|
||||
height, width = get_image_dimensions(image)
|
||||
requested_scale = int(scale_factor.strip("x"))
|
||||
output_width = width * requested_scale
|
||||
output_height = height * requested_scale
|
||||
|
||||
if output_width > max_output_dimension or output_height > max_output_dimension:
|
||||
if auto_downscale:
|
||||
# Find optimal scale factor that doesn't require >2x downscale.
|
||||
# Server upscales in 2x steps, so aggressive downscaling degrades quality.
|
||||
max_dim = max(width, height)
|
||||
scale = 2
|
||||
max_input_dim = max_output_dimension // 2
|
||||
scale_ratio = max_input_dim / max_dim
|
||||
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
|
||||
for candidate in [16, 8, 4, 2]:
|
||||
if candidate > requested_scale:
|
||||
continue
|
||||
output_dim = max_dim * candidate
|
||||
if output_dim <= max_output_dimension:
|
||||
scale = candidate
|
||||
max_input_pixels = None
|
||||
break
|
||||
downscale_ratio = output_dim / max_output_dimension
|
||||
if downscale_ratio <= 2.0:
|
||||
scale = candidate
|
||||
max_input_dim = max_output_dimension // candidate
|
||||
scale_ratio = max_input_dim / max_dim
|
||||
max_input_pixels = int(width * height * scale_ratio * scale_ratio)
|
||||
break
|
||||
|
||||
if max_input_pixels is not None:
|
||||
image = downscale_image_tensor(image, total_pixels=max_input_pixels)
|
||||
requested_scale = scale
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Output dimensions ({output_width}x{output_height}) exceed maximum allowed "
|
||||
f"resolution of {max_output_dimension}x{max_output_dimension} pixels. "
|
||||
f"Use a smaller input image or lower scale factor."
|
||||
)
|
||||
|
||||
initial_res = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/freepik/v1/ai/image-upscaler-precision-v2", method="POST"),
|
||||
response_model=TaskResponse,
|
||||
data=ImageUpscalerPrecisionV2Request(
|
||||
image=(await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=None))[0],
|
||||
scale_factor=requested_scale,
|
||||
flavor=flavor,
|
||||
sharpen=sharpen,
|
||||
smart_grain=smart_grain,
|
||||
ultra_detail=ultra_detail,
|
||||
),
|
||||
)
|
||||
final_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-upscaler-precision-v2/{initial_res.task_id}"),
|
||||
response_model=TaskResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
poll_interval=10.0,
|
||||
max_poll_attempts=480,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
|
||||
|
||||
|
||||
class MagnificImageStyleTransferNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="MagnificImageStyleTransferNode",
|
||||
display_name="Magnific Image Style Transfer",
|
||||
category="api node/image/Magnific",
|
||||
description="Transfer the style from a reference image to your input image.",
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to apply style transfer to."),
|
||||
IO.Image.Input("reference_image", tooltip="The reference image to extract style from."),
|
||||
IO.String.Input("prompt", multiline=True, default=""),
|
||||
IO.Int.Input(
|
||||
"style_strength",
|
||||
min=0,
|
||||
max=100,
|
||||
default=100,
|
||||
tooltip="Percentage of style strength.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"structure_strength",
|
||||
min=0,
|
||||
max=100,
|
||||
default=50,
|
||||
tooltip="Maintains the structure of the original image.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"flavor",
|
||||
options=["faithful", "gen_z", "psychedelia", "detaily", "clear", "donotstyle", "donotstyle_sharp"],
|
||||
tooltip="Style transfer flavor.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"engine",
|
||||
options=[
|
||||
"balanced",
|
||||
"definio",
|
||||
"illusio",
|
||||
"3d_cartoon",
|
||||
"colorful_anime",
|
||||
"caricature",
|
||||
"real",
|
||||
"super_real",
|
||||
"softy",
|
||||
],
|
||||
tooltip="Processing engine selection.",
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"portrait_mode",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("disabled", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"enabled",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"portrait_style",
|
||||
options=["standard", "pop", "super_pop"],
|
||||
tooltip="Visual style applied to portrait images.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"portrait_beautifier",
|
||||
options=["none", "beautify_face", "beautify_face_max"],
|
||||
tooltip="Facial beautification intensity on portraits.",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Enable portrait mode for facial enhancements.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"fixed_generation",
|
||||
default=True,
|
||||
tooltip="When disabled, expect each generation to introduce a degree of randomness, "
|
||||
"leading to more diverse outcomes.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.11}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
reference_image: Input.Image,
|
||||
prompt: str,
|
||||
style_strength: int,
|
||||
structure_strength: int,
|
||||
flavor: str,
|
||||
engine: str,
|
||||
portrait_mode: InputPortraitMode,
|
||||
fixed_generation: bool,
|
||||
) -> IO.NodeOutput:
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
if get_number_of_images(reference_image) != 1:
|
||||
raise ValueError("Exactly one reference image is required.")
|
||||
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_dimensions(image, min_height=160, min_width=160)
|
||||
validate_image_dimensions(reference_image, min_height=160, min_width=160)
|
||||
|
||||
is_portrait = portrait_mode["portrait_mode"] == "enabled"
|
||||
portrait_style = portrait_mode.get("portrait_style", "standard")
|
||||
portrait_beautifier = portrait_mode.get("portrait_beautifier", "none")
|
||||
|
||||
uploaded_urls = await upload_images_to_comfyapi(cls, [image, reference_image], max_images=2)
|
||||
|
||||
initial_res = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/freepik/v1/ai/image-style-transfer", method="POST"),
|
||||
response_model=TaskResponse,
|
||||
data=ImageStyleTransferRequest(
|
||||
image=uploaded_urls[0],
|
||||
reference_image=uploaded_urls[1],
|
||||
prompt=prompt if prompt else None,
|
||||
style_strength=style_strength,
|
||||
structure_strength=structure_strength,
|
||||
is_portrait=is_portrait,
|
||||
portrait_style=portrait_style if is_portrait else None,
|
||||
portrait_beautifier=portrait_beautifier if is_portrait and portrait_beautifier != "none" else None,
|
||||
flavor=flavor,
|
||||
engine=engine,
|
||||
fixed_generation=fixed_generation,
|
||||
),
|
||||
)
|
||||
final_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-style-transfer/{initial_res.task_id}"),
|
||||
response_model=TaskResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
poll_interval=10.0,
|
||||
max_poll_attempts=480,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
|
||||
|
||||
|
||||
class MagnificImageRelightNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="MagnificImageRelightNode",
|
||||
display_name="Magnific Image Relight",
|
||||
category="api node/image/Magnific",
|
||||
description="Relight an image with lighting adjustments and optional reference-based light transfer.",
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The image to relight."),
|
||||
IO.String.Input(
|
||||
"prompt",
|
||||
multiline=True,
|
||||
default="",
|
||||
tooltip="Descriptive guidance for lighting. Supports emphasis notation (1-1.4).",
|
||||
),
|
||||
IO.Int.Input(
|
||||
"light_transfer_strength",
|
||||
min=0,
|
||||
max=100,
|
||||
default=100,
|
||||
tooltip="Intensity of light transfer application.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"style",
|
||||
options=[
|
||||
"standard",
|
||||
"darker_but_realistic",
|
||||
"clean",
|
||||
"smooth",
|
||||
"brighter",
|
||||
"contrasted_n_hdr",
|
||||
"just_composition",
|
||||
],
|
||||
tooltip="Stylistic output preference.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"interpolate_from_original",
|
||||
default=False,
|
||||
tooltip="Restricts generation freedom to match original more closely.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"change_background",
|
||||
default=True,
|
||||
tooltip="Modifies background based on prompt/reference.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"preserve_details",
|
||||
default=True,
|
||||
tooltip="Maintains texture and fine details from original.",
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"advanced_settings",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("disabled", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"enabled",
|
||||
[
|
||||
IO.Int.Input(
|
||||
"whites",
|
||||
min=0,
|
||||
max=100,
|
||||
default=50,
|
||||
tooltip="Adjusts the brightest tones in the image.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"blacks",
|
||||
min=0,
|
||||
max=100,
|
||||
default=50,
|
||||
tooltip="Adjusts the darkest tones in the image.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"brightness",
|
||||
min=0,
|
||||
max=100,
|
||||
default=50,
|
||||
tooltip="Overall brightness adjustment.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"contrast",
|
||||
min=0,
|
||||
max=100,
|
||||
default=50,
|
||||
tooltip="Contrast adjustment.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"saturation",
|
||||
min=0,
|
||||
max=100,
|
||||
default=50,
|
||||
tooltip="Color saturation adjustment.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"engine",
|
||||
options=[
|
||||
"automatic",
|
||||
"balanced",
|
||||
"cool",
|
||||
"real",
|
||||
"illusio",
|
||||
"fairy",
|
||||
"colorful_anime",
|
||||
"hard_transform",
|
||||
"softy",
|
||||
],
|
||||
tooltip="Processing engine selection.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"transfer_light_a",
|
||||
options=["automatic", "low", "medium", "normal", "high", "high_on_faces"],
|
||||
tooltip="The intensity of light transfer.",
|
||||
),
|
||||
IO.Combo.Input(
|
||||
"transfer_light_b",
|
||||
options=[
|
||||
"automatic",
|
||||
"composition",
|
||||
"straight",
|
||||
"smooth_in",
|
||||
"smooth_out",
|
||||
"smooth_both",
|
||||
"reverse_both",
|
||||
"soft_in",
|
||||
"soft_out",
|
||||
"soft_mid",
|
||||
# "strong_mid", # Commented out because requests fail when this is set.
|
||||
"style_shift",
|
||||
"strong_shift",
|
||||
],
|
||||
tooltip="Also modifies light transfer intensity. "
|
||||
"Can be combined with the previous control for varied effects.",
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"fixed_generation",
|
||||
default=True,
|
||||
tooltip="Ensures consistent output with the same settings.",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Fine-tuning options for advanced lighting control.",
|
||||
),
|
||||
IO.Image.Input(
|
||||
"reference_image",
|
||||
optional=True,
|
||||
tooltip="Optional reference image to transfer lighting from.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
expr="""{"type":"usd","usd":0.11}""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
prompt: str,
|
||||
light_transfer_strength: int,
|
||||
style: str,
|
||||
interpolate_from_original: bool,
|
||||
change_background: bool,
|
||||
preserve_details: bool,
|
||||
advanced_settings: InputAdvancedSettings,
|
||||
reference_image: Input.Image | None = None,
|
||||
) -> IO.NodeOutput:
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
if reference_image is not None and get_number_of_images(reference_image) != 1:
|
||||
raise ValueError("Exactly one reference image is required.")
|
||||
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_dimensions(image, min_height=160, min_width=160)
|
||||
if reference_image is not None:
|
||||
validate_image_aspect_ratio(reference_image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_dimensions(reference_image, min_height=160, min_width=160)
|
||||
|
||||
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1))[0]
|
||||
reference_url = None
|
||||
if reference_image is not None:
|
||||
reference_url = (await upload_images_to_comfyapi(cls, reference_image, max_images=1))[0]
|
||||
|
||||
adv_settings = None
|
||||
if advanced_settings["advanced_settings"] == "enabled":
|
||||
adv_settings = ImageRelightAdvancedSettingsRequest(
|
||||
whites=advanced_settings["whites"],
|
||||
blacks=advanced_settings["blacks"],
|
||||
brightness=advanced_settings["brightness"],
|
||||
contrast=advanced_settings["contrast"],
|
||||
saturation=advanced_settings["saturation"],
|
||||
engine=advanced_settings["engine"],
|
||||
transfer_light_a=advanced_settings["transfer_light_a"],
|
||||
transfer_light_b=advanced_settings["transfer_light_b"],
|
||||
fixed_generation=advanced_settings["fixed_generation"],
|
||||
)
|
||||
|
||||
initial_res = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path="/proxy/freepik/v1/ai/image-relight", method="POST"),
|
||||
response_model=TaskResponse,
|
||||
data=ImageRelightRequest(
|
||||
image=image_url,
|
||||
prompt=prompt if prompt else None,
|
||||
transfer_light_from_reference_image=reference_url,
|
||||
light_transfer_strength=light_transfer_strength,
|
||||
interpolate_from_original=interpolate_from_original,
|
||||
change_background=change_background,
|
||||
style=style,
|
||||
preserve_details=preserve_details,
|
||||
advanced_settings=adv_settings,
|
||||
),
|
||||
)
|
||||
final_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/freepik/v1/ai/image-relight/{initial_res.task_id}"),
|
||||
response_model=TaskResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
poll_interval=10.0,
|
||||
max_poll_attempts=480,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
|
||||
|
||||
|
||||
class MagnificImageSkinEnhancerNode(IO.ComfyNode):
|
||||
@classmethod
|
||||
def define_schema(cls):
|
||||
return IO.Schema(
|
||||
node_id="MagnificImageSkinEnhancerNode",
|
||||
display_name="Magnific Image Skin Enhancer",
|
||||
category="api node/image/Magnific",
|
||||
description="Skin enhancement for portraits with multiple processing modes.",
|
||||
inputs=[
|
||||
IO.Image.Input("image", tooltip="The portrait image to enhance."),
|
||||
IO.Int.Input(
|
||||
"sharpen",
|
||||
min=0,
|
||||
max=100,
|
||||
default=0,
|
||||
tooltip="Sharpening intensity level.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.Int.Input(
|
||||
"smart_grain",
|
||||
min=0,
|
||||
max=100,
|
||||
default=2,
|
||||
tooltip="Smart grain intensity level.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
IO.DynamicCombo.Input(
|
||||
"mode",
|
||||
options=[
|
||||
IO.DynamicCombo.Option("creative", []),
|
||||
IO.DynamicCombo.Option(
|
||||
"faithful",
|
||||
[
|
||||
IO.Int.Input(
|
||||
"skin_detail",
|
||||
min=0,
|
||||
max=100,
|
||||
default=80,
|
||||
tooltip="Skin detail enhancement level.",
|
||||
display_mode=IO.NumberDisplay.slider,
|
||||
),
|
||||
],
|
||||
),
|
||||
IO.DynamicCombo.Option(
|
||||
"flexible",
|
||||
[
|
||||
IO.Combo.Input(
|
||||
"optimized_for",
|
||||
options=[
|
||||
"enhance_skin",
|
||||
"improve_lighting",
|
||||
"enhance_everything",
|
||||
"transform_to_real",
|
||||
"no_make_up",
|
||||
],
|
||||
tooltip="Enhancement optimization target.",
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
tooltip="Processing mode: creative for artistic enhancement, "
|
||||
"faithful for preserving original appearance, "
|
||||
"flexible for targeted optimization.",
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Image.Output(),
|
||||
],
|
||||
hidden=[
|
||||
IO.Hidden.auth_token_comfy_org,
|
||||
IO.Hidden.api_key_comfy_org,
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["mode"]),
|
||||
expr="""
|
||||
(
|
||||
$rates := {"creative": 0.29, "faithful": 0.37, "flexible": 0.45};
|
||||
{"type":"usd","usd": $lookup($rates, widgets.mode)}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def execute(
|
||||
cls,
|
||||
image: Input.Image,
|
||||
sharpen: int,
|
||||
smart_grain: int,
|
||||
mode: InputSkinEnhancerMode,
|
||||
) -> IO.NodeOutput:
|
||||
if get_number_of_images(image) != 1:
|
||||
raise ValueError("Exactly one input image is required.")
|
||||
validate_image_aspect_ratio(image, (1, 3), (3, 1), strict=False)
|
||||
validate_image_dimensions(image, min_height=160, min_width=160)
|
||||
|
||||
image_url = (await upload_images_to_comfyapi(cls, image, max_images=1, total_pixels=4096 * 4096))[0]
|
||||
selected_mode = mode["mode"]
|
||||
|
||||
if selected_mode == "creative":
|
||||
endpoint = "creative"
|
||||
data = ImageSkinEnhancerCreativeRequest(
|
||||
image=image_url,
|
||||
sharpen=sharpen,
|
||||
smart_grain=smart_grain,
|
||||
)
|
||||
elif selected_mode == "faithful":
|
||||
endpoint = "faithful"
|
||||
data = ImageSkinEnhancerFaithfulRequest(
|
||||
image=image_url,
|
||||
sharpen=sharpen,
|
||||
smart_grain=smart_grain,
|
||||
skin_detail=mode["skin_detail"],
|
||||
)
|
||||
else: # flexible
|
||||
endpoint = "flexible"
|
||||
data = ImageSkinEnhancerFlexibleRequest(
|
||||
image=image_url,
|
||||
sharpen=sharpen,
|
||||
smart_grain=smart_grain,
|
||||
optimized_for=mode["optimized_for"],
|
||||
)
|
||||
|
||||
initial_res = await sync_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{endpoint}", method="POST"),
|
||||
response_model=TaskResponse,
|
||||
data=data,
|
||||
)
|
||||
final_response = await poll_op(
|
||||
cls,
|
||||
ApiEndpoint(path=f"/proxy/freepik/v1/ai/skin-enhancer/{initial_res.task_id}"),
|
||||
response_model=TaskResponse,
|
||||
status_extractor=lambda x: x.status,
|
||||
poll_interval=10.0,
|
||||
max_poll_attempts=480,
|
||||
)
|
||||
return IO.NodeOutput(await download_url_to_image_tensor(final_response.generated[0]))
|
||||
|
||||
|
||||
class MagnificExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||
return [
|
||||
# MagnificImageUpscalerCreativeNode,
|
||||
# MagnificImageUpscalerPreciseV2Node,
|
||||
MagnificImageStyleTransferNode,
|
||||
MagnificImageRelightNode,
|
||||
MagnificImageSkinEnhancerNode,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> MagnificExtension:
|
||||
return MagnificExtension()
|
||||
@ -56,15 +56,14 @@ def image_tensor_pair_to_batch(image1: torch.Tensor, image2: torch.Tensor) -> to
|
||||
def tensor_to_bytesio(
|
||||
image: torch.Tensor,
|
||||
*,
|
||||
total_pixels: int = 2048 * 2048,
|
||||
total_pixels: int | None = 2048 * 2048,
|
||||
mime_type: str = "image/png",
|
||||
) -> BytesIO:
|
||||
"""Converts a torch.Tensor image to a named BytesIO object.
|
||||
|
||||
Args:
|
||||
image: Input torch.Tensor image.
|
||||
name: Optional filename for the BytesIO object.
|
||||
total_pixels: Maximum total pixels for potential downscaling.
|
||||
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
|
||||
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
|
||||
|
||||
Returns:
|
||||
@ -79,13 +78,14 @@ def tensor_to_bytesio(
|
||||
return img_binary
|
||||
|
||||
|
||||
def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image.Image:
|
||||
def tensor_to_pil(image: torch.Tensor, total_pixels: int | None = 2048 * 2048) -> Image.Image:
|
||||
"""Converts a single torch.Tensor image [H, W, C] to a PIL Image, optionally downscaling."""
|
||||
if len(image.shape) > 3:
|
||||
image = image[0]
|
||||
# TODO: remove alpha if not allowed and present
|
||||
input_tensor = image.cpu()
|
||||
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
|
||||
if total_pixels is not None:
|
||||
input_tensor = downscale_image_tensor(input_tensor.unsqueeze(0), total_pixels=total_pixels).squeeze()
|
||||
image_np = (input_tensor.numpy() * 255).astype(np.uint8)
|
||||
img = Image.fromarray(image_np)
|
||||
return img
|
||||
@ -93,14 +93,14 @@ def tensor_to_pil(image: torch.Tensor, total_pixels: int = 2048 * 2048) -> Image
|
||||
|
||||
def tensor_to_base64_string(
|
||||
image_tensor: torch.Tensor,
|
||||
total_pixels: int = 2048 * 2048,
|
||||
total_pixels: int | None = 2048 * 2048,
|
||||
mime_type: str = "image/png",
|
||||
) -> str:
|
||||
"""Convert [B, H, W, C] or [H, W, C] tensor to a base64 string.
|
||||
|
||||
Args:
|
||||
image_tensor: Input torch.Tensor image.
|
||||
total_pixels: Maximum total pixels for potential downscaling.
|
||||
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
|
||||
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp', 'video/mp4').
|
||||
|
||||
Returns:
|
||||
@ -161,14 +161,14 @@ def downscale_image_tensor_by_max_side(image: torch.Tensor, *, max_side: int) -
|
||||
|
||||
def tensor_to_data_uri(
|
||||
image_tensor: torch.Tensor,
|
||||
total_pixels: int = 2048 * 2048,
|
||||
total_pixels: int | None = 2048 * 2048,
|
||||
mime_type: str = "image/png",
|
||||
) -> str:
|
||||
"""Converts a tensor image to a Data URI string.
|
||||
|
||||
Args:
|
||||
image_tensor: Input torch.Tensor image.
|
||||
total_pixels: Maximum total pixels for potential downscaling.
|
||||
total_pixels: Maximum total pixels for downscaling. If None, no downscaling is performed.
|
||||
mime_type: Target image MIME type (e.g., 'image/png', 'image/jpeg', 'image/webp').
|
||||
|
||||
Returns:
|
||||
|
||||
@ -49,7 +49,7 @@ async def upload_images_to_comfyapi(
|
||||
mime_type: str | None = None,
|
||||
wait_label: str | None = "Uploading",
|
||||
show_batch_index: bool = True,
|
||||
total_pixels: int = 2048 * 2048,
|
||||
total_pixels: int | None = 2048 * 2048,
|
||||
) -> list[str]:
|
||||
"""
|
||||
Uploads images to ComfyUI API and returns download URLs.
|
||||
|
||||
@ -701,7 +701,14 @@ class Noise_EmptyNoise:
|
||||
|
||||
def generate_noise(self, input_latent):
|
||||
latent_image = input_latent["samples"]
|
||||
return torch.zeros(latent_image.shape, dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
|
||||
if latent_image.is_nested:
|
||||
tensors = latent_image.unbind()
|
||||
zeros = []
|
||||
for t in tensors:
|
||||
zeros.append(torch.zeros(t.shape, dtype=t.dtype, layout=t.layout, device="cpu"))
|
||||
return comfy.nested_tensor.NestedTensor(zeros)
|
||||
else:
|
||||
return torch.zeros(latent_image.shape, dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
|
||||
|
||||
|
||||
class Noise_RandomNoise:
|
||||
|
||||
@ -223,11 +223,24 @@ class LTXVAddGuide(io.ComfyNode):
|
||||
return frame_idx, latent_idx
|
||||
|
||||
@classmethod
|
||||
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors):
|
||||
def add_keyframe_index(cls, cond, frame_idx, guiding_latent, scale_factors, latent_downscale_factor=1):
|
||||
keyframe_idxs, _ = get_keyframe_idxs(cond)
|
||||
_, latent_coords = cls.PATCHIFIER.patchify(guiding_latent)
|
||||
pixel_coords = latent_to_pixel_coords(latent_coords, scale_factors, causal_fix=frame_idx == 0) # we need the causal fix only if we're placing the new latents at index 0
|
||||
pixel_coords[:, 0] += frame_idx
|
||||
|
||||
# The following adjusts keyframe end positions for small grid IC-LoRA.
|
||||
# After dilation, the small grid has the same size and position as the large grid,
|
||||
# but each token encodes a larger image patch. We adjust the end position (not start)
|
||||
# so that RoPE represents the correct middle point of each token.
|
||||
# keyframe_idxs dims: (batch, spatial_dim [t,h,w], token_id, [start, end])
|
||||
# We only adjust h,w (not t) in dim 1, and only end (not start) in dim 3.
|
||||
spatial_end_offset = (latent_downscale_factor - 1) * torch.tensor(
|
||||
scale_factors[1:],
|
||||
device=pixel_coords.device,
|
||||
).view(1, -1, 1, 1)
|
||||
pixel_coords[:, 1:, :, 1:] += spatial_end_offset.to(pixel_coords.dtype)
|
||||
|
||||
if keyframe_idxs is None:
|
||||
keyframe_idxs = pixel_coords
|
||||
else:
|
||||
@ -235,12 +248,12 @@ class LTXVAddGuide(io.ComfyNode):
|
||||
return node_helpers.conditioning_set_values(cond, {"keyframe_idxs": keyframe_idxs})
|
||||
|
||||
@classmethod
|
||||
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128):
|
||||
def append_keyframe(cls, positive, negative, frame_idx, latent_image, noise_mask, guiding_latent, strength, scale_factors, guide_mask=None, in_channels=128, latent_downscale_factor=1):
|
||||
if latent_image.shape[1] != in_channels or guiding_latent.shape[1] != in_channels:
|
||||
raise ValueError("Adding guide to a combined AV latent is not supported.")
|
||||
|
||||
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors)
|
||||
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors)
|
||||
positive = cls.add_keyframe_index(positive, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
|
||||
negative = cls.add_keyframe_index(negative, frame_idx, guiding_latent, scale_factors, latent_downscale_factor)
|
||||
|
||||
if guide_mask is not None:
|
||||
target_h = max(noise_mask.shape[3], guide_mask.shape[3])
|
||||
|
||||
46
compose.yaml
Normal file
46
compose.yaml
Normal file
@ -0,0 +1,46 @@
|
||||
# Docker Compose file to run ComfyUI locally using Docker.
|
||||
|
||||
services:
|
||||
comfyui:
|
||||
container_name: comfyui
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
# Declare additional system dependencies for custom nodes
|
||||
APT_EXTRA_PACKAGES:
|
||||
|
||||
ports:
|
||||
- 8188:8188
|
||||
|
||||
# Optional: enable GPU access for hardware acceleration.
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- capabilities: [gpu]
|
||||
volumes:
|
||||
- ./custom_nodes:/comfyui/custom_nodes
|
||||
- ./models:/comfyui/models
|
||||
|
||||
# (Optional) Mount host ComfyUI data directories
|
||||
#
|
||||
#- ./input:/comfyui/input
|
||||
#- ./output:/comfyui/output
|
||||
#- ./temp:/comfyui/temp
|
||||
#- ./user:/comfyui/user
|
||||
|
||||
environment:
|
||||
# Overwrite the container user's UID and GID to match the host's. This
|
||||
# allows files created by ComfyUI to be mounted on the host without
|
||||
# permission issues.
|
||||
UID: 1000
|
||||
GID: 1000
|
||||
# Declare additional Python packages to install. Useful when a custom node
|
||||
# pack does not properly specify all its dependencies or relies on
|
||||
# optional dependencies.
|
||||
PIP_EXTRA_PACKAGES:
|
||||
|
||||
# Optional: Override the default command. In this case, configure ComfyUI to
|
||||
# listen on all network interfaces (which is required when not using
|
||||
# `network_mode=host`.)
|
||||
command: python ./main.py --listen 0.0.0.0
|
||||
62
entrypoint.sh
Executable file
62
entrypoint.sh
Executable file
@ -0,0 +1,62 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Entrypoint script for the ComfyUI Docker image.
|
||||
|
||||
set -e
|
||||
|
||||
user="comfyui"
|
||||
user_group="$user"
|
||||
|
||||
# Allow users to specify a UID and GID matching their own, so files created
|
||||
# inside the container retain the same numeric ownership when mounted on the
|
||||
# host.
|
||||
if [ -n "$UID" ] && [ -n "$GID" ]; then
|
||||
echo "[entrypoint] Setting user UID and GID..."
|
||||
usermod -u "$UID" "$user" > /dev/null
|
||||
groupmod -g "$GID" "$user_group"
|
||||
else
|
||||
echo "[entrypoint] Missing UID or GID environment variables; keeping default values."
|
||||
fi
|
||||
|
||||
# Changing a user's UID and GID revokes that user's access to files owned by the
|
||||
# original UID/GID. To preserve access to runtime data, the ownership of those
|
||||
# directories must be updated recursively so that their numeric owner matches
|
||||
# the user's new UID and GID.
|
||||
echo "[entrypoint] Changing directory ownership..."
|
||||
chown -R "$user:$user_group" \
|
||||
/comfyui \
|
||||
/home/comfyui
|
||||
|
||||
# To use CUDA and other NVIDIA features, regular users must belong to the group
|
||||
# that owns the /dev/nvidia* device files -- typically the video group.
|
||||
#
|
||||
# Known issue: Because these device files are mounted from the host system,
|
||||
# there's no guarantee that the device's group ID will match the intended group
|
||||
# inside the container. For example, the video group might be mapped to GID 27
|
||||
# on the host, which corresponds to the sudo group in the python:3.12 image.
|
||||
# This shouldn't cause major problems, and given the lack of a universal
|
||||
# standard for system GIDs, there isn't much we can realistically change to
|
||||
# address this issue.
|
||||
echo "[entrypoint] Adding user to GPU device groups..."
|
||||
for dev in /dev/nvidia*; do
|
||||
group=$(ls -ld "$dev" | awk '{print $4}')
|
||||
usermod -aG "$group" "$user"
|
||||
done
|
||||
|
||||
# Install or update the Python dependencies defined by ComfyUI (or any installed
|
||||
# custom node) and also install any user-defined dependencies specified in
|
||||
# PIP_EXTRA_PACKAGES.
|
||||
echo "[entrypoint] Updating Python dependencies..."
|
||||
su -c "
|
||||
pip install \\
|
||||
--no-cache-dir \\
|
||||
--disable-pip-version-check \\
|
||||
-r requirements.txt \\
|
||||
$(find custom_nodes -mindepth 2 -maxdepth 2 -type f -name requirements.txt -printf "-r '%p' ") \\
|
||||
$PIP_EXTRA_PACKAGES
|
||||
" comfyui \
|
||||
|| echo "[entrypoint] Failed to install dependencies, starting anyway" >&2
|
||||
|
||||
# Run command as comfyui
|
||||
echo "[entrypoint] Running command"
|
||||
exec su -c "$*" comfyui
|
||||
@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.37.11
|
||||
comfyui-workflow-templates==0.8.15
|
||||
comfyui-workflow-templates==0.8.23
|
||||
comfyui-embedded-docs==0.4.0
|
||||
torch
|
||||
torchsde
|
||||
|
||||
Loading…
Reference in New Issue
Block a user