mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-01 01:30:51 +08:00
Merge branch 'master' into v3-improvements
This commit is contained in:
commit
414b1b4c8c
@ -491,7 +491,8 @@ class NextDiT(nn.Module):
|
|||||||
for layer_id in range(n_layers)
|
for layer_id in range(n_layers)
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
self.norm_final = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype"))
|
# This norm final is in the lumina 2.0 code but isn't actually used for anything.
|
||||||
|
# self.norm_final = operation_settings.get("operations").RMSNorm(dim, eps=norm_eps, elementwise_affine=True, device=operation_settings.get("device"), dtype=operation_settings.get("dtype"))
|
||||||
self.final_layer = FinalLayer(dim, patch_size, self.out_channels, z_image_modulation=z_image_modulation, operation_settings=operation_settings)
|
self.final_layer = FinalLayer(dim, patch_size, self.out_channels, z_image_modulation=z_image_modulation, operation_settings=operation_settings)
|
||||||
|
|
||||||
if self.pad_tokens_multiple is not None:
|
if self.pad_tokens_multiple is not None:
|
||||||
|
|||||||
@ -1019,8 +1019,8 @@ NUM_STREAMS = 0
|
|||||||
if args.async_offload is not None:
|
if args.async_offload is not None:
|
||||||
NUM_STREAMS = args.async_offload
|
NUM_STREAMS = args.async_offload
|
||||||
else:
|
else:
|
||||||
# Enable by default on Nvidia
|
# Enable by default on Nvidia and AMD
|
||||||
if is_nvidia():
|
if is_nvidia() or is_amd():
|
||||||
NUM_STREAMS = 2
|
NUM_STREAMS = 2
|
||||||
|
|
||||||
if args.disable_async_offload:
|
if args.disable_async_offload:
|
||||||
@ -1126,6 +1126,16 @@ if not args.disable_pinned_memory:
|
|||||||
|
|
||||||
PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"])
|
PINNING_ALLOWED_TYPES = set(["Parameter", "QuantizedTensor"])
|
||||||
|
|
||||||
|
def discard_cuda_async_error():
|
||||||
|
try:
|
||||||
|
a = torch.tensor([1], dtype=torch.uint8, device=get_torch_device())
|
||||||
|
b = torch.tensor([1], dtype=torch.uint8, device=get_torch_device())
|
||||||
|
_ = a + b
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
except torch.AcceleratorError:
|
||||||
|
#Dump it! We already know about it from the synchronous return
|
||||||
|
pass
|
||||||
|
|
||||||
def pin_memory(tensor):
|
def pin_memory(tensor):
|
||||||
global TOTAL_PINNED_MEMORY
|
global TOTAL_PINNED_MEMORY
|
||||||
if MAX_PINNED_MEMORY <= 0:
|
if MAX_PINNED_MEMORY <= 0:
|
||||||
@ -1158,6 +1168,9 @@ def pin_memory(tensor):
|
|||||||
PINNED_MEMORY[ptr] = size
|
PINNED_MEMORY[ptr] = size
|
||||||
TOTAL_PINNED_MEMORY += size
|
TOTAL_PINNED_MEMORY += size
|
||||||
return True
|
return True
|
||||||
|
else:
|
||||||
|
logging.warning("Pin error.")
|
||||||
|
discard_cuda_async_error()
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -1186,6 +1199,9 @@ def unpin_memory(tensor):
|
|||||||
if len(PINNED_MEMORY) == 0:
|
if len(PINNED_MEMORY) == 0:
|
||||||
TOTAL_PINNED_MEMORY = 0
|
TOTAL_PINNED_MEMORY = 0
|
||||||
return True
|
return True
|
||||||
|
else:
|
||||||
|
logging.warning("Unpin error.")
|
||||||
|
discard_cuda_async_error()
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|||||||
@ -168,6 +168,8 @@ class VeoVideoGenerationNode(IO.ComfyNode):
|
|||||||
# Only add generateAudio for Veo 3 models
|
# Only add generateAudio for Veo 3 models
|
||||||
if model.find("veo-2.0") == -1:
|
if model.find("veo-2.0") == -1:
|
||||||
parameters["generateAudio"] = generate_audio
|
parameters["generateAudio"] = generate_audio
|
||||||
|
# force "enhance_prompt" to True for Veo3 models
|
||||||
|
parameters["enhancePrompt"] = True
|
||||||
|
|
||||||
initial_response = await sync_op(
|
initial_response = await sync_op(
|
||||||
cls,
|
cls,
|
||||||
@ -291,7 +293,7 @@ class Veo3VideoGenerationNode(VeoVideoGenerationNode):
|
|||||||
IO.Boolean.Input(
|
IO.Boolean.Input(
|
||||||
"enhance_prompt",
|
"enhance_prompt",
|
||||||
default=True,
|
default=True,
|
||||||
tooltip="Whether to enhance the prompt with AI assistance",
|
tooltip="This parameter is deprecated and ignored.",
|
||||||
optional=True,
|
optional=True,
|
||||||
),
|
),
|
||||||
IO.Combo.Input(
|
IO.Combo.Input(
|
||||||
|
|||||||
@ -430,9 +430,9 @@ def _display_text(
|
|||||||
if status:
|
if status:
|
||||||
display_lines.append(f"Status: {status.capitalize() if isinstance(status, str) else status}")
|
display_lines.append(f"Status: {status.capitalize() if isinstance(status, str) else status}")
|
||||||
if price is not None:
|
if price is not None:
|
||||||
p = f"{float(price):,.4f}".rstrip("0").rstrip(".")
|
p = f"{float(price) * 211:,.1f}".rstrip("0").rstrip(".")
|
||||||
if p != "0":
|
if p != "0":
|
||||||
display_lines.append(f"Price: ${p}")
|
display_lines.append(f"Price: {p} credits")
|
||||||
if text is not None:
|
if text is not None:
|
||||||
display_lines.append(text)
|
display_lines.append(text)
|
||||||
if display_lines:
|
if display_lines:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user