mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-30 00:00:26 +08:00
Compare commits
21 Commits
2e461ed9cc
...
04533997fe
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
04533997fe | ||
|
|
9125613b53 | ||
|
|
732b707397 | ||
|
|
4c816d5c69 | ||
|
|
6125b3a5e7 | ||
|
|
12918a5f78 | ||
|
|
8f40b43e02 | ||
|
|
3b832231bb | ||
|
|
f030d2c425 | ||
|
|
019eaab4c9 | ||
|
|
f330220f66 | ||
|
|
d35e0fcdd7 | ||
|
|
3d0331813d | ||
|
|
afd4b725db | ||
|
|
149506beea | ||
|
|
dfca61be7f | ||
|
|
39a5c5621e | ||
|
|
0d20e44618 | ||
|
|
5f415089fc | ||
|
|
6d23bfde7f | ||
|
|
0eff10fd21 |
@ -377,8 +377,22 @@ class UserManager():
|
||||
try:
|
||||
body = await request.read()
|
||||
|
||||
with open(path, "wb") as f:
|
||||
f.write(body)
|
||||
# Pretty print JSON files for better source control
|
||||
if path.lower().endswith('.json'):
|
||||
try:
|
||||
# Parse JSON and re-serialize with indentation
|
||||
json_data = json.loads(body.decode('utf-8'))
|
||||
formatted_json = json.dumps(json_data, indent=2)
|
||||
with open(path, "w", encoding='utf-8') as f:
|
||||
f.write(formatted_json)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError):
|
||||
# If JSON parsing fails, save as-is
|
||||
with open(path, "wb") as f:
|
||||
f.write(body)
|
||||
else:
|
||||
# Non-JSON files are saved as-is
|
||||
with open(path, "wb") as f:
|
||||
f.write(body)
|
||||
except OSError as e:
|
||||
logging.warning(f"Error saving file '{path}': {e}")
|
||||
return web.Response(
|
||||
|
||||
15
comfy/sd.py
15
comfy/sd.py
@ -1014,6 +1014,7 @@ class CLIPType(Enum):
|
||||
KANDINSKY5 = 22
|
||||
KANDINSKY5_IMAGE = 23
|
||||
NEWBIE = 24
|
||||
FLUX2 = 25
|
||||
|
||||
|
||||
def load_clip(ckpt_paths, embedding_directory=None, clip_type=CLIPType.STABLE_DIFFUSION, model_options={}):
|
||||
@ -1046,6 +1047,7 @@ class TEModel(Enum):
|
||||
QWEN3_2B = 17
|
||||
GEMMA_3_12B = 18
|
||||
JINA_CLIP_2 = 19
|
||||
QWEN3_8B = 20
|
||||
|
||||
|
||||
def detect_te_model(sd):
|
||||
@ -1089,6 +1091,8 @@ def detect_te_model(sd):
|
||||
return TEModel.QWEN3_4B
|
||||
elif weight.shape[0] == 2048:
|
||||
return TEModel.QWEN3_2B
|
||||
elif weight.shape[0] == 4096:
|
||||
return TEModel.QWEN3_8B
|
||||
if weight.shape[0] == 5120:
|
||||
if "model.layers.39.post_attention_layernorm.weight" in sd:
|
||||
return TEModel.MISTRAL3_24B
|
||||
@ -1214,11 +1218,18 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
|
||||
clip_target.tokenizer = comfy.text_encoders.flux.Flux2Tokenizer
|
||||
tokenizer_data["tekken_model"] = clip_data[0].get("tekken_model", None)
|
||||
elif te_model == TEModel.QWEN3_4B:
|
||||
clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer
|
||||
if clip_type == CLIPType.FLUX or clip_type == CLIPType.FLUX2:
|
||||
clip_target.clip = comfy.text_encoders.flux.klein_te(**llama_detect(clip_data), model_type="qwen3_4b")
|
||||
clip_target.tokenizer = comfy.text_encoders.flux.KleinTokenizer
|
||||
else:
|
||||
clip_target.clip = comfy.text_encoders.z_image.te(**llama_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.z_image.ZImageTokenizer
|
||||
elif te_model == TEModel.QWEN3_2B:
|
||||
clip_target.clip = comfy.text_encoders.ovis.te(**llama_detect(clip_data))
|
||||
clip_target.tokenizer = comfy.text_encoders.ovis.OvisTokenizer
|
||||
elif te_model == TEModel.QWEN3_8B:
|
||||
clip_target.clip = comfy.text_encoders.flux.klein_te(**llama_detect(clip_data), model_type="qwen3_8b")
|
||||
clip_target.tokenizer = comfy.text_encoders.flux.KleinTokenizer8B
|
||||
elif te_model == TEModel.JINA_CLIP_2:
|
||||
clip_target.clip = comfy.text_encoders.jina_clip_2.JinaClip2TextModelWrapper
|
||||
clip_target.tokenizer = comfy.text_encoders.jina_clip_2.JinaClip2TokenizerWrapper
|
||||
|
||||
@ -763,7 +763,7 @@ class Flux2(Flux):
|
||||
|
||||
def __init__(self, unet_config):
|
||||
super().__init__(unet_config)
|
||||
self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * 2.36
|
||||
self.memory_usage_factor = self.memory_usage_factor * (2.0 * 2.0) * (unet_config['hidden_size'] / 2604)
|
||||
|
||||
def get_model(self, state_dict, prefix="", device=None):
|
||||
out = model_base.Flux2(self, device=device)
|
||||
|
||||
@ -3,7 +3,7 @@ import comfy.text_encoders.t5
|
||||
import comfy.text_encoders.sd3_clip
|
||||
import comfy.text_encoders.llama
|
||||
import comfy.model_management
|
||||
from transformers import T5TokenizerFast, LlamaTokenizerFast
|
||||
from transformers import T5TokenizerFast, LlamaTokenizerFast, Qwen2Tokenizer
|
||||
import torch
|
||||
import os
|
||||
import json
|
||||
@ -172,3 +172,60 @@ def flux2_te(dtype_llama=None, llama_quantization_metadata=None, pruned=False):
|
||||
model_options["num_layers"] = 30
|
||||
super().__init__(device=device, dtype=dtype, model_options=model_options)
|
||||
return Flux2TEModel_
|
||||
|
||||
class Qwen3Tokenizer(sd1_clip.SDTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer")
|
||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=2560, embedding_key='qwen3_4b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=151643, tokenizer_data=tokenizer_data)
|
||||
|
||||
class Qwen3Tokenizer8B(sd1_clip.SDTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}):
|
||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "qwen25_tokenizer")
|
||||
super().__init__(tokenizer_path, pad_with_end=False, embedding_size=4096, embedding_key='qwen3_8b', tokenizer_class=Qwen2Tokenizer, has_start_token=False, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=512, pad_token=151643, tokenizer_data=tokenizer_data)
|
||||
|
||||
class KleinTokenizer(sd1_clip.SD1Tokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}, name="qwen3_4b"):
|
||||
if name == "qwen3_4b":
|
||||
tokenizer = Qwen3Tokenizer
|
||||
elif name == "qwen3_8b":
|
||||
tokenizer = Qwen3Tokenizer8B
|
||||
|
||||
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name=name, tokenizer=tokenizer)
|
||||
self.llama_template = "<|im_start|>user\n{}<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
|
||||
|
||||
def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, **kwargs):
|
||||
if llama_template is None:
|
||||
llama_text = self.llama_template.format(text)
|
||||
else:
|
||||
llama_text = llama_template.format(text)
|
||||
|
||||
tokens = super().tokenize_with_weights(llama_text, return_word_ids=return_word_ids, disable_weights=True, **kwargs)
|
||||
return tokens
|
||||
|
||||
class KleinTokenizer8B(KleinTokenizer):
|
||||
def __init__(self, embedding_directory=None, tokenizer_data={}, name="qwen3_8b"):
|
||||
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name=name)
|
||||
|
||||
class Qwen3_4BModel(sd1_clip.SDClipModel):
|
||||
def __init__(self, device="cpu", layer=[9, 18, 27], layer_idx=None, dtype=None, attention_mask=True, model_options={}):
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_4B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
|
||||
|
||||
class Qwen3_8BModel(sd1_clip.SDClipModel):
|
||||
def __init__(self, device="cpu", layer=[9, 18, 27], layer_idx=None, dtype=None, attention_mask=True, model_options={}):
|
||||
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"pad": 151643}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Qwen3_8B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
|
||||
|
||||
def klein_te(dtype_llama=None, llama_quantization_metadata=None, model_type="qwen3_4b"):
|
||||
if model_type == "qwen3_4b":
|
||||
model = Qwen3_4BModel
|
||||
elif model_type == "qwen3_8b":
|
||||
model = Qwen3_8BModel
|
||||
|
||||
class Flux2TEModel_(Flux2TEModel):
|
||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||
if llama_quantization_metadata is not None:
|
||||
model_options = model_options.copy()
|
||||
model_options["quantization_metadata"] = llama_quantization_metadata
|
||||
if dtype_llama is not None:
|
||||
dtype = dtype_llama
|
||||
super().__init__(device=device, dtype=dtype, name=model_type, model_options=model_options, clip_model=model)
|
||||
return Flux2TEModel_
|
||||
|
||||
@ -99,6 +99,28 @@ class Qwen3_4BConfig:
|
||||
rope_scale = None
|
||||
final_norm: bool = True
|
||||
|
||||
@dataclass
|
||||
class Qwen3_8BConfig:
|
||||
vocab_size: int = 151936
|
||||
hidden_size: int = 4096
|
||||
intermediate_size: int = 12288
|
||||
num_hidden_layers: int = 36
|
||||
num_attention_heads: int = 32
|
||||
num_key_value_heads: int = 8
|
||||
max_position_embeddings: int = 40960
|
||||
rms_norm_eps: float = 1e-6
|
||||
rope_theta: float = 1000000.0
|
||||
transformer_type: str = "llama"
|
||||
head_dim = 128
|
||||
rms_norm_add = False
|
||||
mlp_activation = "silu"
|
||||
qkv_bias = False
|
||||
rope_dims = None
|
||||
q_norm = "gemma3"
|
||||
k_norm = "gemma3"
|
||||
rope_scale = None
|
||||
final_norm: bool = True
|
||||
|
||||
@dataclass
|
||||
class Ovis25_2BConfig:
|
||||
vocab_size: int = 151936
|
||||
@ -628,6 +650,15 @@ class Qwen3_4B(BaseLlama, torch.nn.Module):
|
||||
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
|
||||
self.dtype = dtype
|
||||
|
||||
class Qwen3_8B(BaseLlama, torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device, operations):
|
||||
super().__init__()
|
||||
config = Qwen3_8BConfig(**config_dict)
|
||||
self.num_layers = config.num_hidden_layers
|
||||
|
||||
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
|
||||
self.dtype = dtype
|
||||
|
||||
class Ovis25_2B(BaseLlama, torch.nn.Module):
|
||||
def __init__(self, config_dict, dtype, device, operations):
|
||||
super().__init__()
|
||||
|
||||
@ -65,11 +65,13 @@ class TaskImageContent(BaseModel):
|
||||
class Text2VideoTaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
content: list[TaskTextContent] = Field(..., min_length=1)
|
||||
generate_audio: bool | None = Field(...)
|
||||
|
||||
|
||||
class Image2VideoTaskCreationRequest(BaseModel):
|
||||
model: str = Field(...)
|
||||
content: list[TaskTextContent | TaskImageContent] = Field(..., min_length=2)
|
||||
generate_audio: bool | None = Field(...)
|
||||
|
||||
|
||||
class TaskCreationResponse(BaseModel):
|
||||
@ -141,4 +143,9 @@ VIDEO_TASKS_EXECUTION_TIME = {
|
||||
"720p": 65,
|
||||
"1080p": 100,
|
||||
},
|
||||
"seedance-1-5-pro-251215": {
|
||||
"480p": 80,
|
||||
"720p": 100,
|
||||
"1080p": 150,
|
||||
},
|
||||
}
|
||||
|
||||
@ -477,7 +477,12 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"],
|
||||
options=[
|
||||
"seedance-1-5-pro-251215",
|
||||
"seedance-1-0-pro-250528",
|
||||
"seedance-1-0-lite-t2v-250428",
|
||||
"seedance-1-0-pro-fast-251015",
|
||||
],
|
||||
default="seedance-1-0-pro-fast-251015",
|
||||
),
|
||||
IO.String.Input(
|
||||
@ -528,6 +533,12 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
tooltip='Whether to add an "AI generated" watermark to the video.',
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=False,
|
||||
tooltip="This parameter is ignored for any model except seedance-1-5-pro.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
@ -552,7 +563,10 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
seed: int,
|
||||
camera_fixed: bool,
|
||||
watermark: bool,
|
||||
generate_audio: bool = False,
|
||||
) -> IO.NodeOutput:
|
||||
if model == "seedance-1-5-pro-251215" and duration < 4:
|
||||
raise ValueError("Minimum supported duration for Seedance 1.5 Pro is 4 seconds.")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
|
||||
|
||||
@ -567,7 +581,11 @@ class ByteDanceTextToVideoNode(IO.ComfyNode):
|
||||
)
|
||||
return await process_video_task(
|
||||
cls,
|
||||
payload=Text2VideoTaskCreationRequest(model=model, content=[TaskTextContent(text=prompt)]),
|
||||
payload=Text2VideoTaskCreationRequest(
|
||||
model=model,
|
||||
content=[TaskTextContent(text=prompt)],
|
||||
generate_audio=generate_audio if model == "seedance-1-5-pro-251215" else None,
|
||||
),
|
||||
estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))),
|
||||
)
|
||||
|
||||
@ -584,7 +602,12 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["seedance-1-0-pro-250528", "seedance-1-0-lite-t2v-250428", "seedance-1-0-pro-fast-251015"],
|
||||
options=[
|
||||
"seedance-1-5-pro-251215",
|
||||
"seedance-1-0-pro-250528",
|
||||
"seedance-1-0-lite-i2v-250428",
|
||||
"seedance-1-0-pro-fast-251015",
|
||||
],
|
||||
default="seedance-1-0-pro-fast-251015",
|
||||
),
|
||||
IO.String.Input(
|
||||
@ -639,6 +662,12 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
tooltip='Whether to add an "AI generated" watermark to the video.',
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=False,
|
||||
tooltip="This parameter is ignored for any model except seedance-1-5-pro.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
@ -664,7 +693,10 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
seed: int,
|
||||
camera_fixed: bool,
|
||||
watermark: bool,
|
||||
generate_audio: bool = False,
|
||||
) -> IO.NodeOutput:
|
||||
if model == "seedance-1-5-pro-251215" and duration < 4:
|
||||
raise ValueError("Minimum supported duration for Seedance 1.5 Pro is 4 seconds.")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
|
||||
validate_image_dimensions(image, min_width=300, min_height=300, max_width=6000, max_height=6000)
|
||||
@ -686,6 +718,7 @@ class ByteDanceImageToVideoNode(IO.ComfyNode):
|
||||
payload=Image2VideoTaskCreationRequest(
|
||||
model=model,
|
||||
content=[TaskTextContent(text=prompt), TaskImageContent(image_url=TaskImageContentUrl(url=image_url))],
|
||||
generate_audio=generate_audio if model == "seedance-1-5-pro-251215" else None,
|
||||
),
|
||||
estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))),
|
||||
)
|
||||
@ -703,7 +736,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
inputs=[
|
||||
IO.Combo.Input(
|
||||
"model",
|
||||
options=["seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"],
|
||||
options=["seedance-1-5-pro-251215", "seedance-1-0-pro-250528", "seedance-1-0-lite-i2v-250428"],
|
||||
default="seedance-1-0-lite-i2v-250428",
|
||||
),
|
||||
IO.String.Input(
|
||||
@ -762,6 +795,12 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
tooltip='Whether to add an "AI generated" watermark to the video.',
|
||||
optional=True,
|
||||
),
|
||||
IO.Boolean.Input(
|
||||
"generate_audio",
|
||||
default=False,
|
||||
tooltip="This parameter is ignored for any model except seedance-1-5-pro.",
|
||||
optional=True,
|
||||
),
|
||||
],
|
||||
outputs=[
|
||||
IO.Video.Output(),
|
||||
@ -788,7 +827,10 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
seed: int,
|
||||
camera_fixed: bool,
|
||||
watermark: bool,
|
||||
generate_audio: bool = False,
|
||||
) -> IO.NodeOutput:
|
||||
if model == "seedance-1-5-pro-251215" and duration < 4:
|
||||
raise ValueError("Minimum supported duration for Seedance 1.5 Pro is 4 seconds.")
|
||||
validate_string(prompt, strip_whitespace=True, min_length=1)
|
||||
raise_if_text_params(prompt, ["resolution", "ratio", "duration", "seed", "camerafixed", "watermark"])
|
||||
for i in (first_frame, last_frame):
|
||||
@ -821,6 +863,7 @@ class ByteDanceFirstLastFrameNode(IO.ComfyNode):
|
||||
TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[0])), role="first_frame"),
|
||||
TaskImageContent(image_url=TaskImageContentUrl(url=str(download_urls[1])), role="last_frame"),
|
||||
],
|
||||
generate_audio=generate_audio if model == "seedance-1-5-pro-251215" else None,
|
||||
),
|
||||
estimated_duration=max(1, math.ceil(VIDEO_TASKS_EXECUTION_TIME[model][resolution] * (duration / 10.0))),
|
||||
)
|
||||
@ -896,7 +939,41 @@ class ByteDanceImageReferenceNode(IO.ComfyNode):
|
||||
IO.Hidden.unique_id,
|
||||
],
|
||||
is_api_node=True,
|
||||
price_badge=PRICE_BADGE_VIDEO,
|
||||
price_badge=IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution"]),
|
||||
expr="""
|
||||
(
|
||||
$priceByModel := {
|
||||
"seedance-1-0-pro": {
|
||||
"480p":[0.23,0.24],
|
||||
"720p":[0.51,0.56]
|
||||
},
|
||||
"seedance-1-0-lite": {
|
||||
"480p":[0.17,0.18],
|
||||
"720p":[0.37,0.41]
|
||||
}
|
||||
};
|
||||
$model := widgets.model;
|
||||
$modelKey :=
|
||||
$contains($model, "seedance-1-0-pro") ? "seedance-1-0-pro" :
|
||||
"seedance-1-0-lite";
|
||||
$resolution := widgets.resolution;
|
||||
$resKey :=
|
||||
$contains($resolution, "720") ? "720p" :
|
||||
"480p";
|
||||
$modelPrices := $lookup($priceByModel, $modelKey);
|
||||
$baseRange := $lookup($modelPrices, $resKey);
|
||||
$min10s := $baseRange[0];
|
||||
$max10s := $baseRange[1];
|
||||
$scale := widgets.duration / 10;
|
||||
$minCost := $min10s * $scale;
|
||||
$maxCost := $max10s * $scale;
|
||||
($minCost = $maxCost)
|
||||
? {"type":"usd","usd": $minCost}
|
||||
: {"type":"range_usd","min_usd": $minCost, "max_usd": $maxCost}
|
||||
)
|
||||
""",
|
||||
),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@ -967,10 +1044,15 @@ def raise_if_text_params(prompt: str, text_params: list[str]) -> None:
|
||||
|
||||
|
||||
PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution"]),
|
||||
depends_on=IO.PriceBadgeDepends(widgets=["model", "duration", "resolution", "generate_audio"]),
|
||||
expr="""
|
||||
(
|
||||
$priceByModel := {
|
||||
"seedance-1-5-pro": {
|
||||
"480p":[0.12,0.12],
|
||||
"720p":[0.26,0.26],
|
||||
"1080p":[0.58,0.59]
|
||||
},
|
||||
"seedance-1-0-pro": {
|
||||
"480p":[0.23,0.24],
|
||||
"720p":[0.51,0.56],
|
||||
@ -989,6 +1071,7 @@ PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
};
|
||||
$model := widgets.model;
|
||||
$modelKey :=
|
||||
$contains($model, "seedance-1-5-pro") ? "seedance-1-5-pro" :
|
||||
$contains($model, "seedance-1-0-pro-fast") ? "seedance-1-0-pro-fast" :
|
||||
$contains($model, "seedance-1-0-pro") ? "seedance-1-0-pro" :
|
||||
"seedance-1-0-lite";
|
||||
@ -1002,11 +1085,12 @@ PRICE_BADGE_VIDEO = IO.PriceBadge(
|
||||
$min10s := $baseRange[0];
|
||||
$max10s := $baseRange[1];
|
||||
$scale := widgets.duration / 10;
|
||||
$minCost := $min10s * $scale;
|
||||
$maxCost := $max10s * $scale;
|
||||
$audioMultiplier := ($modelKey = "seedance-1-5-pro" and widgets.generate_audio) ? 2 : 1;
|
||||
$minCost := $min10s * $scale * $audioMultiplier;
|
||||
$maxCost := $max10s * $scale * $audioMultiplier;
|
||||
($minCost = $maxCost)
|
||||
? {"type":"usd","usd": $minCost}
|
||||
: {"type":"range_usd","min_usd": $minCost, "max_usd": $maxCost}
|
||||
? {"type":"usd","usd": $minCost, "format": { "approximate": true }}
|
||||
: {"type":"range_usd","min_usd": $minCost, "max_usd": $maxCost, "format": { "approximate": true }}
|
||||
)
|
||||
""",
|
||||
)
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
# This file is automatically generated by the build process when version is
|
||||
# updated in pyproject.toml.
|
||||
__version__ = "0.9.1"
|
||||
__version__ = "0.9.2"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "ComfyUI"
|
||||
version = "0.9.1"
|
||||
version = "0.9.2"
|
||||
readme = "README.md"
|
||||
license = { file = "LICENSE" }
|
||||
requires-python = ">=3.10"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
comfyui-frontend-package==1.36.14
|
||||
comfyui-workflow-templates==0.8.4
|
||||
comfyui-workflow-templates==0.8.10
|
||||
comfyui-embedded-docs==0.4.0
|
||||
torch
|
||||
torchsde
|
||||
|
||||
13
server.py
13
server.py
@ -521,7 +521,7 @@ class PromptServer():
|
||||
buffer.seek(0)
|
||||
|
||||
return web.Response(body=buffer.read(), content_type=f'image/{image_format}',
|
||||
headers={"Content-Disposition": f"filename=\"{filename}\""})
|
||||
headers={"Content-Disposition": f"attachment; filename=\"{filename}\""})
|
||||
|
||||
if 'channel' not in request.rel_url.query:
|
||||
channel = 'rgba'
|
||||
@ -541,7 +541,7 @@ class PromptServer():
|
||||
buffer.seek(0)
|
||||
|
||||
return web.Response(body=buffer.read(), content_type='image/png',
|
||||
headers={"Content-Disposition": f"filename=\"{filename}\""})
|
||||
headers={"Content-Disposition": f"attachment; filename=\"{filename}\""})
|
||||
|
||||
elif channel == 'a':
|
||||
with Image.open(file) as img:
|
||||
@ -558,7 +558,7 @@ class PromptServer():
|
||||
alpha_buffer.seek(0)
|
||||
|
||||
return web.Response(body=alpha_buffer.read(), content_type='image/png',
|
||||
headers={"Content-Disposition": f"filename=\"{filename}\""})
|
||||
headers={"Content-Disposition": f"attachment; filename=\"{filename}\""})
|
||||
else:
|
||||
# Get content type from mimetype, defaulting to 'application/octet-stream'
|
||||
content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
|
||||
@ -570,7 +570,7 @@ class PromptServer():
|
||||
return web.FileResponse(
|
||||
file,
|
||||
headers={
|
||||
"Content-Disposition": f"filename=\"{filename}\"",
|
||||
"Content-Disposition": f"attachment; filename=\"{filename}\"",
|
||||
"Content-Type": content_type
|
||||
}
|
||||
)
|
||||
@ -686,7 +686,10 @@ class PromptServer():
|
||||
|
||||
@routes.get("/object_info")
|
||||
async def get_object_info(request):
|
||||
seed_assets(["models"])
|
||||
try:
|
||||
seed_assets(["models"])
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to seed assets: {e}")
|
||||
with folder_paths.cache_helper:
|
||||
out = {}
|
||||
for x in nodes.NODE_CLASS_MAPPINGS:
|
||||
|
||||
@ -287,3 +287,72 @@ async def test_listuserdata_v2_url_encoded_path(aiohttp_client, app, tmp_path):
|
||||
assert entry["name"] == "file.txt"
|
||||
# Ensure the path is correctly decoded and uses forward slash
|
||||
assert entry["path"] == "my dir/file.txt"
|
||||
|
||||
|
||||
async def test_post_userdata_json_pretty_print(aiohttp_client, app, tmp_path):
|
||||
"""Test that JSON files are saved with pretty printing (indentation)"""
|
||||
import json
|
||||
|
||||
client = await aiohttp_client(app)
|
||||
|
||||
# Create a compact JSON workflow
|
||||
workflow_data = {
|
||||
"nodes": [
|
||||
{"id": "1", "type": "LoadImage", "inputs": {"image": "test.png"}},
|
||||
{"id": "2", "type": "SaveImage", "inputs": {"images": ["1", 0]}}
|
||||
],
|
||||
"metadata": {"version": "1.0", "author": "test"}
|
||||
}
|
||||
compact_json = json.dumps(workflow_data).encode('utf-8')
|
||||
|
||||
# Save as JSON file
|
||||
resp = await client.post("/userdata/workflow.json", data=compact_json)
|
||||
assert resp.status == 200
|
||||
|
||||
# Read the saved file and verify it's pretty-printed
|
||||
with open(tmp_path / "workflow.json", "r", encoding='utf-8') as f:
|
||||
saved_content = f.read()
|
||||
|
||||
# Verify the file contains indentation (pretty-printed)
|
||||
assert " " in saved_content # Should have 2-space indentation
|
||||
assert "\n" in saved_content # Should have newlines
|
||||
|
||||
# Verify the content is still valid JSON and matches original data
|
||||
saved_data = json.loads(saved_content)
|
||||
assert saved_data == workflow_data
|
||||
|
||||
# Verify it's actually formatted (not compact)
|
||||
# Compact JSON would be much shorter
|
||||
assert len(saved_content) > len(compact_json)
|
||||
|
||||
|
||||
async def test_post_userdata_json_invalid_fallback(aiohttp_client, app, tmp_path):
|
||||
"""Test that invalid JSON is saved as-is without error"""
|
||||
client = await aiohttp_client(app)
|
||||
|
||||
# Create invalid JSON content
|
||||
invalid_json = b'{"invalid": json content}'
|
||||
|
||||
# Save as JSON file - should not fail
|
||||
resp = await client.post("/userdata/invalid.json", data=invalid_json)
|
||||
assert resp.status == 200
|
||||
|
||||
# Verify file was saved as-is
|
||||
with open(tmp_path / "invalid.json", "rb") as f:
|
||||
assert f.read() == invalid_json
|
||||
|
||||
|
||||
async def test_post_userdata_non_json_unchanged(aiohttp_client, app, tmp_path):
|
||||
"""Test that non-JSON files are saved unchanged"""
|
||||
client = await aiohttp_client(app)
|
||||
|
||||
# Create binary content
|
||||
binary_content = b'\x00\x01\x02\x03\x04\x05'
|
||||
|
||||
# Save as non-JSON file
|
||||
resp = await client.post("/userdata/test.bin", data=binary_content)
|
||||
assert resp.status == 200
|
||||
|
||||
# Verify file was saved exactly as-is
|
||||
with open(tmp_path / "test.bin", "rb") as f:
|
||||
assert f.read() == binary_content
|
||||
|
||||
Loading…
Reference in New Issue
Block a user