Merge branch 'master' into dr-support-pip-cm

This commit is contained in:
Dr.Lt.Data 2025-09-30 06:54:59 +09:00
commit 976cee95f8
9 changed files with 353 additions and 85 deletions

View File

@ -0,0 +1,64 @@
name: "Windows Release dependencies Manual"
on:
workflow_dispatch:
inputs:
torch_dependencies:
description: 'torch dependencies'
required: false
type: string
default: "torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu128"
cache_tag:
description: 'Cached dependencies tag'
required: true
type: string
default: "cu128"
python_minor:
description: 'python minor version'
required: true
type: string
default: "12"
python_patch:
description: 'python patch version'
required: true
type: string
default: "10"
jobs:
build_dependencies:
runs-on: windows-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: 3.${{ inputs.python_minor }}.${{ inputs.python_patch }}
- shell: bash
run: |
echo "@echo off
call update_comfyui.bat nopause
echo -
echo This will try to update pytorch and all python dependencies.
echo -
echo If you just want to update normally, close this and run update_comfyui.bat instead.
echo -
pause
..\python_embeded\python.exe -s -m pip install --upgrade ${{ inputs.torch_dependencies }} -r ../ComfyUI/requirements.txt pygit2
pause" > update_comfyui_and_python_dependencies.bat
grep -v comfyui requirements.txt > requirements_nocomfyui.txt
python -m pip wheel --no-cache-dir ${{ inputs.torch_dependencies }} -r requirements_nocomfyui.txt pygit2 -w ./temp_wheel_dir
python -m pip install --no-cache-dir ./temp_wheel_dir/*
echo installed basic
ls -lah temp_wheel_dir
mv temp_wheel_dir ${{ inputs.cache_tag }}_python_deps
tar cf ${{ inputs.cache_tag }}_python_deps.tar ${{ inputs.cache_tag }}_python_deps
- uses: actions/cache/save@v4
with:
path: |
${{ inputs.cache_tag }}_python_deps.tar
update_comfyui_and_python_dependencies.bat
key: ${{ runner.os }}-build-${{ inputs.cache_tag }}-${{ inputs.python_minor }}

View File

@ -11,7 +11,6 @@ from comfy.comfy_types.node_typing import IO
import folder_paths as comfy_paths import folder_paths as comfy_paths
import aiohttp import aiohttp
import os import os
import datetime
import asyncio import asyncio
import io import io
import logging import logging
@ -243,8 +242,8 @@ class Rodin3DAPI:
return mesh_mode, quality_override return mesh_mode, quality_override
async def download_files(self, url_list): async def download_files(self, url_list, task_uuid):
save_path = os.path.join(comfy_paths.get_output_directory(), "Rodin3D", datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) save_path = os.path.join(comfy_paths.get_output_directory(), f"Rodin3D_{task_uuid}")
os.makedirs(save_path, exist_ok=True) os.makedirs(save_path, exist_ok=True)
model_file_path = None model_file_path = None
async with aiohttp.ClientSession() as session: async with aiohttp.ClientSession() as session:
@ -320,7 +319,7 @@ class Rodin3D_Regular(Rodin3DAPI):
**kwargs) **kwargs)
await self.poll_for_task_status(subscription_key, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs)
download_list = await self.get_rodin_download_list(task_uuid, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs)
model = await self.download_files(download_list) model = await self.download_files(download_list, task_uuid)
return (model,) return (model,)
@ -366,7 +365,7 @@ class Rodin3D_Detail(Rodin3DAPI):
**kwargs) **kwargs)
await self.poll_for_task_status(subscription_key, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs)
download_list = await self.get_rodin_download_list(task_uuid, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs)
model = await self.download_files(download_list) model = await self.download_files(download_list, task_uuid)
return (model,) return (model,)
@ -412,7 +411,7 @@ class Rodin3D_Smooth(Rodin3DAPI):
**kwargs) **kwargs)
await self.poll_for_task_status(subscription_key, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs)
download_list = await self.get_rodin_download_list(task_uuid, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs)
model = await self.download_files(download_list) model = await self.download_files(download_list, task_uuid)
return (model,) return (model,)
@ -467,7 +466,7 @@ class Rodin3D_Sketch(Rodin3DAPI):
) )
await self.poll_for_task_status(subscription_key, **kwargs) await self.poll_for_task_status(subscription_key, **kwargs)
download_list = await self.get_rodin_download_list(task_uuid, **kwargs) download_list = await self.get_rodin_download_list(task_uuid, **kwargs)
model = await self.download_files(download_list) model = await self.download_files(download_list, task_uuid)
return (model,) return (model,)

View File

@ -28,6 +28,12 @@ class Text2ImageInputField(BaseModel):
negative_prompt: Optional[str] = Field(None) negative_prompt: Optional[str] = Field(None)
class Image2ImageInputField(BaseModel):
prompt: str = Field(...)
negative_prompt: Optional[str] = Field(None)
images: list[str] = Field(..., min_length=1, max_length=2)
class Text2VideoInputField(BaseModel): class Text2VideoInputField(BaseModel):
prompt: str = Field(...) prompt: str = Field(...)
negative_prompt: Optional[str] = Field(None) negative_prompt: Optional[str] = Field(None)
@ -49,6 +55,13 @@ class Txt2ImageParametersField(BaseModel):
watermark: bool = Field(True) watermark: bool = Field(True)
class Image2ImageParametersField(BaseModel):
size: Optional[str] = Field(None)
n: int = Field(1, description="Number of images to generate.") # we support only value=1
seed: int = Field(..., ge=0, le=2147483647)
watermark: bool = Field(True)
class Text2VideoParametersField(BaseModel): class Text2VideoParametersField(BaseModel):
size: str = Field(...) size: str = Field(...)
seed: int = Field(..., ge=0, le=2147483647) seed: int = Field(..., ge=0, le=2147483647)
@ -73,6 +86,12 @@ class Text2ImageTaskCreationRequest(BaseModel):
parameters: Txt2ImageParametersField = Field(...) parameters: Txt2ImageParametersField = Field(...)
class Image2ImageTaskCreationRequest(BaseModel):
model: str = Field(...)
input: Image2ImageInputField = Field(...)
parameters: Image2ImageParametersField = Field(...)
class Text2VideoTaskCreationRequest(BaseModel): class Text2VideoTaskCreationRequest(BaseModel):
model: str = Field(...) model: str = Field(...)
input: Text2VideoInputField = Field(...) input: Text2VideoInputField = Field(...)
@ -135,7 +154,12 @@ async def process_task(
url: str, url: str,
request_model: Type[T], request_model: Type[T],
response_model: Type[R], response_model: Type[R],
payload: Union[Text2ImageTaskCreationRequest, Text2VideoTaskCreationRequest, Image2VideoTaskCreationRequest], payload: Union[
Text2ImageTaskCreationRequest,
Image2ImageTaskCreationRequest,
Text2VideoTaskCreationRequest,
Image2VideoTaskCreationRequest,
],
node_id: str, node_id: str,
estimated_duration: int, estimated_duration: int,
poll_interval: int, poll_interval: int,
@ -288,6 +312,128 @@ class WanTextToImageApi(comfy_io.ComfyNode):
return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url))) return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url)))
class WanImageToImageApi(comfy_io.ComfyNode):
@classmethod
def define_schema(cls):
return comfy_io.Schema(
node_id="WanImageToImageApi",
display_name="Wan Image to Image",
category="api node/image/Wan",
description="Generates an image from one or two input images and a text prompt. "
"The output image is currently fixed at 1.6 MP; its aspect ratio matches the input image(s).",
inputs=[
comfy_io.Combo.Input(
"model",
options=["wan2.5-i2i-preview"],
default="wan2.5-i2i-preview",
tooltip="Model to use.",
),
comfy_io.Image.Input(
"image",
tooltip="Single-image editing or multi-image fusion, maximum 2 images.",
),
comfy_io.String.Input(
"prompt",
multiline=True,
default="",
tooltip="Prompt used to describe the elements and visual features, supports English/Chinese.",
),
comfy_io.String.Input(
"negative_prompt",
multiline=True,
default="",
tooltip="Negative text prompt to guide what to avoid.",
optional=True,
),
# redo this later as an optional combo of recommended resolutions
# comfy_io.Int.Input(
# "width",
# default=1280,
# min=384,
# max=1440,
# step=16,
# optional=True,
# ),
# comfy_io.Int.Input(
# "height",
# default=1280,
# min=384,
# max=1440,
# step=16,
# optional=True,
# ),
comfy_io.Int.Input(
"seed",
default=0,
min=0,
max=2147483647,
step=1,
display_mode=comfy_io.NumberDisplay.number,
control_after_generate=True,
tooltip="Seed to use for generation.",
optional=True,
),
comfy_io.Boolean.Input(
"watermark",
default=True,
tooltip="Whether to add an \"AI generated\" watermark to the result.",
optional=True,
),
],
outputs=[
comfy_io.Image.Output(),
],
hidden=[
comfy_io.Hidden.auth_token_comfy_org,
comfy_io.Hidden.api_key_comfy_org,
comfy_io.Hidden.unique_id,
],
is_api_node=True,
)
@classmethod
async def execute(
cls,
model: str,
image: torch.Tensor,
prompt: str,
negative_prompt: str = "",
# width: int = 1024,
# height: int = 1024,
seed: int = 0,
watermark: bool = True,
):
n_images = get_number_of_images(image)
if n_images not in (1, 2):
raise ValueError(f"Expected 1 or 2 input images, got {n_images}.")
images = []
for i in image:
images.append("data:image/png;base64," + tensor_to_base64_string(i, total_pixels=4096*4096))
payload = Image2ImageTaskCreationRequest(
model=model,
input=Image2ImageInputField(prompt=prompt, negative_prompt=negative_prompt, images=images),
parameters=Image2ImageParametersField(
# size=f"{width}*{height}",
seed=seed,
watermark=watermark,
),
)
response = await process_task(
{
"auth_token": cls.hidden.auth_token_comfy_org,
"comfy_api_key": cls.hidden.api_key_comfy_org,
},
"/proxy/wan/api/v1/services/aigc/image2image/image-synthesis",
request_model=Image2ImageTaskCreationRequest,
response_model=ImageTaskStatusResponse,
payload=payload,
node_id=cls.hidden.unique_id,
estimated_duration=42,
poll_interval=3,
)
return comfy_io.NodeOutput(await download_url_to_image_tensor(str(response.output.results[0].url)))
class WanTextToVideoApi(comfy_io.ComfyNode): class WanTextToVideoApi(comfy_io.ComfyNode):
@classmethod @classmethod
def define_schema(cls): def define_schema(cls):
@ -593,6 +739,7 @@ class WanApiExtension(ComfyExtension):
async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]: async def get_node_list(self) -> list[type[comfy_io.ComfyNode]]:
return [ return [
WanTextToImageApi, WanTextToImageApi,
WanImageToImageApi,
WanTextToVideoApi, WanTextToVideoApi,
WanImageToVideoApi, WanImageToVideoApi,
] ]

View File

@ -1,17 +1,29 @@
from typing_extensions import override
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
class Mahiro: from comfy_api.latest import ComfyExtension, io
class Mahiro(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": {"model": ("MODEL",), return io.Schema(
}} node_id="Mahiro",
RETURN_TYPES = ("MODEL",) display_name="Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)",
RETURN_NAMES = ("patched_model",) category="_for_testing",
FUNCTION = "patch" description="Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt.",
CATEGORY = "_for_testing" inputs=[
DESCRIPTION = "Modify the guidance to scale more on the 'direction' of the positive prompt rather than the difference between the negative prompt." io.Model.Input("model"),
def patch(self, model): ],
outputs=[
io.Model.Output(display_name="patched_model"),
],
is_experimental=True,
)
@classmethod
def execute(cls, model) -> io.NodeOutput:
m = model.clone() m = model.clone()
def mahiro_normd(args): def mahiro_normd(args):
scale: float = args['cond_scale'] scale: float = args['cond_scale']
@ -30,12 +42,16 @@ class Mahiro:
wm = (simsc*cfg + (4-simsc)*leap) / 4 wm = (simsc*cfg + (4-simsc)*leap) / 4
return wm return wm
m.set_model_sampler_post_cfg_function(mahiro_normd) m.set_model_sampler_post_cfg_function(mahiro_normd)
return (m, ) return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"Mahiro": Mahiro
}
NODE_DISPLAY_NAME_MAPPINGS = { class MahiroExtension(ComfyExtension):
"Mahiro": "Mahiro is so cute that she deserves a better guidance function!! (。・ω・。)", @override
} async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
Mahiro,
]
async def comfy_entrypoint() -> MahiroExtension:
return MahiroExtension()

View File

@ -1,23 +1,40 @@
import nodes from typing_extensions import override
import torch import torch
import comfy.model_management import comfy.model_management
import nodes
from comfy_api.latest import ComfyExtension, io
class EmptyMochiLatentVideo:
class EmptyMochiLatentVideo(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": { "width": ("INT", {"default": 848, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), return io.Schema(
"height": ("INT", {"default": 480, "min": 16, "max": nodes.MAX_RESOLUTION, "step": 16}), node_id="EmptyMochiLatentVideo",
"length": ("INT", {"default": 25, "min": 7, "max": nodes.MAX_RESOLUTION, "step": 6}), category="latent/video",
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}} inputs=[
RETURN_TYPES = ("LATENT",) io.Int.Input("width", default=848, min=16, max=nodes.MAX_RESOLUTION, step=16),
FUNCTION = "generate" io.Int.Input("height", default=480, min=16, max=nodes.MAX_RESOLUTION, step=16),
io.Int.Input("length", default=25, min=7, max=nodes.MAX_RESOLUTION, step=6),
io.Int.Input("batch_size", default=1, min=1, max=4096),
],
outputs=[
io.Latent.Output(),
],
)
CATEGORY = "latent/video" @classmethod
def execute(cls, width, height, length, batch_size=1) -> io.NodeOutput:
def generate(self, width, height, length, batch_size=1):
latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device()) latent = torch.zeros([batch_size, 12, ((length - 1) // 6) + 1, height // 8, width // 8], device=comfy.model_management.intermediate_device())
return ({"samples":latent}, ) return io.NodeOutput({"samples": latent})
NODE_CLASS_MAPPINGS = {
"EmptyMochiLatentVideo": EmptyMochiLatentVideo, class MochiExtension(ComfyExtension):
} @override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
EmptyMochiLatentVideo,
]
async def comfy_entrypoint() -> MochiExtension:
return MochiExtension()

View File

@ -5,6 +5,9 @@ import comfy.samplers
import comfy.utils import comfy.utils
import node_helpers import node_helpers
import math import math
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale): def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale):
pos = noise_pred_pos - noise_pred_nocond pos = noise_pred_pos - noise_pred_nocond
@ -16,20 +19,27 @@ def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, co
return cfg_result return cfg_result
#TODO: This node should be removed, it has been replaced with PerpNegGuider #TODO: This node should be removed, it has been replaced with PerpNegGuider
class PerpNeg: class PerpNeg(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": {"model": ("MODEL", ), return io.Schema(
"empty_conditioning": ("CONDITIONING", ), node_id="PerpNeg",
"neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), display_name="Perp-Neg (DEPRECATED by PerpNegGuider)",
}} category="_for_testing",
RETURN_TYPES = ("MODEL",) inputs=[
FUNCTION = "patch" io.Model.Input("model"),
io.Conditioning.Input("empty_conditioning"),
io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01),
],
outputs=[
io.Model.Output(),
],
is_experimental=True,
is_deprecated=True,
)
CATEGORY = "_for_testing" @classmethod
DEPRECATED = True def execute(cls, model, empty_conditioning, neg_scale) -> io.NodeOutput:
def patch(self, model, empty_conditioning, neg_scale):
m = model.clone() m = model.clone()
nocond = comfy.sampler_helpers.convert_cond(empty_conditioning) nocond = comfy.sampler_helpers.convert_cond(empty_conditioning)
@ -50,7 +60,7 @@ class PerpNeg:
m.set_model_sampler_cfg_function(cfg_function) m.set_model_sampler_cfg_function(cfg_function)
return (m, ) return io.NodeOutput(m)
class Guider_PerpNeg(comfy.samplers.CFGGuider): class Guider_PerpNeg(comfy.samplers.CFGGuider):
@ -112,35 +122,42 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider):
return cfg_result return cfg_result
class PerpNegGuider: class PerpNegGuider(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": return io.Schema(
{"model": ("MODEL",), node_id="PerpNegGuider",
"positive": ("CONDITIONING", ), category="_for_testing",
"negative": ("CONDITIONING", ), inputs=[
"empty_conditioning": ("CONDITIONING", ), io.Model.Input("model"),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), io.Conditioning.Input("positive"),
"neg_scale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), io.Conditioning.Input("negative"),
} io.Conditioning.Input("empty_conditioning"),
} io.Float.Input("cfg", default=8.0, min=0.0, max=100.0, step=0.1, round=0.01),
io.Float.Input("neg_scale", default=1.0, min=0.0, max=100.0, step=0.01),
],
outputs=[
io.Guider.Output(),
],
is_experimental=True,
)
RETURN_TYPES = ("GUIDER",) @classmethod
def execute(cls, model, positive, negative, empty_conditioning, cfg, neg_scale) -> io.NodeOutput:
FUNCTION = "get_guider"
CATEGORY = "_for_testing"
def get_guider(self, model, positive, negative, empty_conditioning, cfg, neg_scale):
guider = Guider_PerpNeg(model) guider = Guider_PerpNeg(model)
guider.set_conds(positive, negative, empty_conditioning) guider.set_conds(positive, negative, empty_conditioning)
guider.set_cfg(cfg, neg_scale) guider.set_cfg(cfg, neg_scale)
return (guider,) return io.NodeOutput(guider)
NODE_CLASS_MAPPINGS = {
"PerpNeg": PerpNeg,
"PerpNegGuider": PerpNegGuider,
}
NODE_DISPLAY_NAME_MAPPINGS = { class PerpNegExtension(ComfyExtension):
"PerpNeg": "Perp-Neg (DEPRECATED by PerpNegGuider)", @override
} async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
PerpNeg,
PerpNegGuider,
]
async def comfy_entrypoint() -> PerpNegExtension:
return PerpNegExtension()

View File

@ -26,11 +26,12 @@ async def cache_control(
"""Cache control middleware that sets appropriate cache headers based on file type and response status""" """Cache control middleware that sets appropriate cache headers based on file type and response status"""
response: web.Response = await handler(request) response: web.Response = await handler(request)
if ( path_filename = request.path.rsplit("/", 1)[-1]
request.path.endswith(".js") is_entry_point = path_filename.startswith("index") and path_filename.endswith(
or request.path.endswith(".css") ".json"
or request.path.endswith("index.json") )
):
if request.path.endswith(".js") or request.path.endswith(".css") or is_entry_point:
response.headers.setdefault("Cache-Control", "no-cache") response.headers.setdefault("Cache-Control", "no-cache")
return response return response

View File

@ -1,5 +1,5 @@
comfyui-frontend-package==1.26.13 comfyui-frontend-package==1.26.13
comfyui-workflow-templates==0.1.88 comfyui-workflow-templates==0.1.91
comfyui-embedded-docs==0.2.6 comfyui-embedded-docs==0.2.6
comfyui_manager==4.0.2 comfyui_manager==4.0.2
torch torch

View File

@ -48,6 +48,13 @@ CACHE_SCENARIOS = [
"expected_cache": "no-cache", "expected_cache": "no-cache",
"should_have_header": True, "should_have_header": True,
}, },
{
"name": "localized_index_json_no_cache",
"path": "/templates/index.zh.json",
"status": 200,
"expected_cache": "no-cache",
"should_have_header": True,
},
# Non-matching files # Non-matching files
{ {
"name": "html_no_header", "name": "html_no_header",