mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-19 02:53:05 +08:00
Merge branch 'master' into dr-support-pip-cm
This commit is contained in:
commit
d6fa7a7c84
@ -1447,14 +1447,15 @@ def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None,
|
|||||||
old_d = d
|
old_d = d
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_gradient_estimation_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
|
def sample_gradient_estimation_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):
|
||||||
return sample_gradient_estimation(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, ge_gamma=ge_gamma, cfg_pp=True)
|
return sample_gradient_estimation(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, ge_gamma=ge_gamma, cfg_pp=True)
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None, noise_scaler=None, max_stage=3):
|
def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.0, noise_sampler=None, noise_scaler=None, max_stage=3):
|
||||||
"""
|
"""Extended Reverse-Time SDE solver (VP ER-SDE-Solver-3). arXiv: https://arxiv.org/abs/2309.06169.
|
||||||
Extended Reverse-Time SDE solver (VE ER-SDE-Solver-3). Arxiv: https://arxiv.org/abs/2309.06169.
|
|
||||||
Code reference: https://github.com/QinpengCui/ER-SDE-Solver/blob/main/er_sde_solver.py.
|
Code reference: https://github.com/QinpengCui/ER-SDE-Solver/blob/main/er_sde_solver.py.
|
||||||
"""
|
"""
|
||||||
extra_args = {} if extra_args is None else extra_args
|
extra_args = {} if extra_args is None else extra_args
|
||||||
@ -1462,12 +1463,18 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None
|
|||||||
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
|
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
|
||||||
s_in = x.new_ones([x.shape[0]])
|
s_in = x.new_ones([x.shape[0]])
|
||||||
|
|
||||||
def default_noise_scaler(sigma):
|
def default_er_sde_noise_scaler(x):
|
||||||
return sigma * ((sigma ** 0.3).exp() + 10.0)
|
return x * ((x ** 0.3).exp() + 10.0)
|
||||||
noise_scaler = default_noise_scaler if noise_scaler is None else noise_scaler
|
|
||||||
|
noise_scaler = default_er_sde_noise_scaler if noise_scaler is None else noise_scaler
|
||||||
num_integration_points = 200.0
|
num_integration_points = 200.0
|
||||||
point_indice = torch.arange(0, num_integration_points, dtype=torch.float32, device=x.device)
|
point_indice = torch.arange(0, num_integration_points, dtype=torch.float32, device=x.device)
|
||||||
|
|
||||||
|
model_sampling = model.inner_model.model_patcher.get_model_object("model_sampling")
|
||||||
|
sigmas = offset_first_sigma_for_snr(sigmas, model_sampling)
|
||||||
|
half_log_snrs = sigma_to_half_log_snr(sigmas, model_sampling)
|
||||||
|
er_lambdas = half_log_snrs.neg().exp() # er_lambda_t = sigma_t / alpha_t
|
||||||
|
|
||||||
old_denoised = None
|
old_denoised = None
|
||||||
old_denoised_d = None
|
old_denoised_d = None
|
||||||
|
|
||||||
@ -1478,32 +1485,36 @@ def sample_er_sde(model, x, sigmas, extra_args=None, callback=None, disable=None
|
|||||||
stage_used = min(max_stage, i + 1)
|
stage_used = min(max_stage, i + 1)
|
||||||
if sigmas[i + 1] == 0:
|
if sigmas[i + 1] == 0:
|
||||||
x = denoised
|
x = denoised
|
||||||
elif stage_used == 1:
|
|
||||||
r = noise_scaler(sigmas[i + 1]) / noise_scaler(sigmas[i])
|
|
||||||
x = r * x + (1 - r) * denoised
|
|
||||||
else:
|
else:
|
||||||
r = noise_scaler(sigmas[i + 1]) / noise_scaler(sigmas[i])
|
er_lambda_s, er_lambda_t = er_lambdas[i], er_lambdas[i + 1]
|
||||||
x = r * x + (1 - r) * denoised
|
alpha_s = sigmas[i] / er_lambda_s
|
||||||
|
alpha_t = sigmas[i + 1] / er_lambda_t
|
||||||
|
r_alpha = alpha_t / alpha_s
|
||||||
|
r = noise_scaler(er_lambda_t) / noise_scaler(er_lambda_s)
|
||||||
|
|
||||||
dt = sigmas[i + 1] - sigmas[i]
|
# Stage 1 Euler
|
||||||
sigma_step_size = -dt / num_integration_points
|
x = r_alpha * r * x + alpha_t * (1 - r) * denoised
|
||||||
sigma_pos = sigmas[i + 1] + point_indice * sigma_step_size
|
|
||||||
scaled_pos = noise_scaler(sigma_pos)
|
if stage_used >= 2:
|
||||||
|
dt = er_lambda_t - er_lambda_s
|
||||||
|
lambda_step_size = -dt / num_integration_points
|
||||||
|
lambda_pos = er_lambda_t + point_indice * lambda_step_size
|
||||||
|
scaled_pos = noise_scaler(lambda_pos)
|
||||||
|
|
||||||
# Stage 2
|
# Stage 2
|
||||||
s = torch.sum(1 / scaled_pos) * sigma_step_size
|
s = torch.sum(1 / scaled_pos) * lambda_step_size
|
||||||
denoised_d = (denoised - old_denoised) / (sigmas[i] - sigmas[i - 1])
|
denoised_d = (denoised - old_denoised) / (er_lambda_s - er_lambdas[i - 1])
|
||||||
x = x + (dt + s * noise_scaler(sigmas[i + 1])) * denoised_d
|
x = x + alpha_t * (dt + s * noise_scaler(er_lambda_t)) * denoised_d
|
||||||
|
|
||||||
if stage_used >= 3:
|
if stage_used >= 3:
|
||||||
# Stage 3
|
# Stage 3
|
||||||
s_u = torch.sum((sigma_pos - sigmas[i]) / scaled_pos) * sigma_step_size
|
s_u = torch.sum((lambda_pos - er_lambda_s) / scaled_pos) * lambda_step_size
|
||||||
denoised_u = (denoised_d - old_denoised_d) / ((sigmas[i] - sigmas[i - 2]) / 2)
|
denoised_u = (denoised_d - old_denoised_d) / ((er_lambda_s - er_lambdas[i - 2]) / 2)
|
||||||
x = x + ((dt ** 2) / 2 + s_u * noise_scaler(sigmas[i + 1])) * denoised_u
|
x = x + alpha_t * ((dt ** 2) / 2 + s_u * noise_scaler(er_lambda_t)) * denoised_u
|
||||||
old_denoised_d = denoised_d
|
old_denoised_d = denoised_d
|
||||||
|
|
||||||
if s_noise != 0 and sigmas[i + 1] > 0:
|
if s_noise > 0:
|
||||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * (sigmas[i + 1] ** 2 - sigmas[i] ** 2 * r ** 2).sqrt().nan_to_num(nan=0.0)
|
x = x + alpha_t * noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * (er_lambda_t ** 2 - er_lambda_s ** 2 * r ** 2).sqrt().nan_to_num(nan=0.0)
|
||||||
old_denoised = denoised
|
old_denoised = denoised
|
||||||
return x
|
return x
|
||||||
|
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import math
|
|||||||
import comfy.samplers
|
import comfy.samplers
|
||||||
import comfy.sample
|
import comfy.sample
|
||||||
from comfy.k_diffusion import sampling as k_diffusion_sampling
|
from comfy.k_diffusion import sampling as k_diffusion_sampling
|
||||||
|
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
|
||||||
import latent_preview
|
import latent_preview
|
||||||
import torch
|
import torch
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
@ -480,6 +481,46 @@ class SamplerDPMAdaptative:
|
|||||||
"s_noise":s_noise })
|
"s_noise":s_noise })
|
||||||
return (sampler, )
|
return (sampler, )
|
||||||
|
|
||||||
|
|
||||||
|
class SamplerER_SDE(ComfyNodeABC):
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls) -> InputTypeDict:
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"solver_type": (IO.COMBO, {"options": ["ER-SDE", "Reverse-time SDE", "ODE"]}),
|
||||||
|
"max_stage": (IO.INT, {"default": 3, "min": 1, "max": 3}),
|
||||||
|
"eta": (
|
||||||
|
IO.FLOAT,
|
||||||
|
{"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False, "tooltip": "Stochastic strength of reverse-time SDE.\nWhen eta=0, it reduces to deterministic ODE. This setting doesn't apply to ER-SDE solver type."},
|
||||||
|
),
|
||||||
|
"s_noise": (IO.FLOAT, {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01, "round": False}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.SAMPLER,)
|
||||||
|
CATEGORY = "sampling/custom_sampling/samplers"
|
||||||
|
|
||||||
|
FUNCTION = "get_sampler"
|
||||||
|
|
||||||
|
def get_sampler(self, solver_type, max_stage, eta, s_noise):
|
||||||
|
if solver_type == "ODE" or (solver_type == "Reverse-time SDE" and eta == 0):
|
||||||
|
eta = 0
|
||||||
|
s_noise = 0
|
||||||
|
|
||||||
|
def reverse_time_sde_noise_scaler(x):
|
||||||
|
return x ** (eta + 1)
|
||||||
|
|
||||||
|
if solver_type == "ER-SDE":
|
||||||
|
# Use the default one in sample_er_sde()
|
||||||
|
noise_scaler = None
|
||||||
|
else:
|
||||||
|
noise_scaler = reverse_time_sde_noise_scaler
|
||||||
|
|
||||||
|
sampler_name = "er_sde"
|
||||||
|
sampler = comfy.samplers.ksampler(sampler_name, {"s_noise": s_noise, "noise_scaler": noise_scaler, "max_stage": max_stage})
|
||||||
|
return (sampler,)
|
||||||
|
|
||||||
|
|
||||||
class Noise_EmptyNoise:
|
class Noise_EmptyNoise:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.seed = 0
|
self.seed = 0
|
||||||
@ -787,6 +828,7 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"SamplerDPMPP_SDE": SamplerDPMPP_SDE,
|
"SamplerDPMPP_SDE": SamplerDPMPP_SDE,
|
||||||
"SamplerDPMPP_2S_Ancestral": SamplerDPMPP_2S_Ancestral,
|
"SamplerDPMPP_2S_Ancestral": SamplerDPMPP_2S_Ancestral,
|
||||||
"SamplerDPMAdaptative": SamplerDPMAdaptative,
|
"SamplerDPMAdaptative": SamplerDPMAdaptative,
|
||||||
|
"SamplerER_SDE": SamplerER_SDE,
|
||||||
"SplitSigmas": SplitSigmas,
|
"SplitSigmas": SplitSigmas,
|
||||||
"SplitSigmasDenoise": SplitSigmasDenoise,
|
"SplitSigmasDenoise": SplitSigmasDenoise,
|
||||||
"FlipSigmas": FlipSigmas,
|
"FlipSigmas": FlipSigmas,
|
||||||
|
|||||||
@ -5,6 +5,8 @@ import os
|
|||||||
from comfy.comfy_types import IO
|
from comfy.comfy_types import IO
|
||||||
from comfy_api.input_impl import VideoFromFile
|
from comfy_api.input_impl import VideoFromFile
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
def normalize_path(path):
|
def normalize_path(path):
|
||||||
return path.replace('\\', '/')
|
return path.replace('\\', '/')
|
||||||
@ -16,7 +18,14 @@ class Load3D():
|
|||||||
|
|
||||||
os.makedirs(input_dir, exist_ok=True)
|
os.makedirs(input_dir, exist_ok=True)
|
||||||
|
|
||||||
files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.obj', '.fbx', '.stl'))]
|
input_path = Path(input_dir)
|
||||||
|
base_path = Path(folder_paths.get_input_directory())
|
||||||
|
|
||||||
|
files = [
|
||||||
|
normalize_path(str(file_path.relative_to(base_path)))
|
||||||
|
for file_path in input_path.rglob("*")
|
||||||
|
if file_path.suffix.lower() in {'.gltf', '.glb', '.obj', '.fbx', '.stl'}
|
||||||
|
]
|
||||||
|
|
||||||
return {"required": {
|
return {"required": {
|
||||||
"model_file": (sorted(files), {"file_upload": True}),
|
"model_file": (sorted(files), {"file_upload": True}),
|
||||||
@ -61,7 +70,14 @@ class Load3DAnimation():
|
|||||||
|
|
||||||
os.makedirs(input_dir, exist_ok=True)
|
os.makedirs(input_dir, exist_ok=True)
|
||||||
|
|
||||||
files = [normalize_path(os.path.join("3d", f)) for f in os.listdir(input_dir) if f.endswith(('.gltf', '.glb', '.fbx'))]
|
input_path = Path(input_dir)
|
||||||
|
base_path = Path(folder_paths.get_input_directory())
|
||||||
|
|
||||||
|
files = [
|
||||||
|
normalize_path(str(file_path.relative_to(base_path)))
|
||||||
|
for file_path in input_path.rglob("*")
|
||||||
|
if file_path.suffix.lower() in {'.gltf', '.glb', '.fbx'}
|
||||||
|
]
|
||||||
|
|
||||||
return {"required": {
|
return {"required": {
|
||||||
"model_file": (sorted(files), {"file_upload": True}),
|
"model_file": (sorted(files), {"file_upload": True}),
|
||||||
|
|||||||
@ -4,6 +4,7 @@ import comfy.sampler_helpers
|
|||||||
import comfy.samplers
|
import comfy.samplers
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
import node_helpers
|
import node_helpers
|
||||||
|
import math
|
||||||
|
|
||||||
def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale):
|
def perp_neg(x, noise_pred_pos, noise_pred_neg, noise_pred_nocond, neg_scale, cond_scale):
|
||||||
pos = noise_pred_pos - noise_pred_nocond
|
pos = noise_pred_pos - noise_pred_nocond
|
||||||
@ -69,6 +70,12 @@ class Guider_PerpNeg(comfy.samplers.CFGGuider):
|
|||||||
negative_cond = self.conds.get("negative", None)
|
negative_cond = self.conds.get("negative", None)
|
||||||
empty_cond = self.conds.get("empty_negative_prompt", None)
|
empty_cond = self.conds.get("empty_negative_prompt", None)
|
||||||
|
|
||||||
|
if model_options.get("disable_cfg1_optimization", False) == False:
|
||||||
|
if math.isclose(self.neg_scale, 0.0):
|
||||||
|
negative_cond = None
|
||||||
|
if math.isclose(self.cfg, 1.0):
|
||||||
|
empty_cond = None
|
||||||
|
|
||||||
conds = [positive_cond, negative_cond, empty_cond]
|
conds = [positive_cond, negative_cond, empty_cond]
|
||||||
|
|
||||||
out = comfy.samplers.calc_cond_batch(self.inner_model, conds, x, timestep, model_options)
|
out = comfy.samplers.calc_cond_batch(self.inner_model, conds, x, timestep, model_options)
|
||||||
|
|||||||
71
comfy_extras/nodes_tcfg.py
Normal file
71
comfy_extras/nodes_tcfg.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
# TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137)
|
||||||
|
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
|
||||||
|
|
||||||
|
|
||||||
|
def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor:
|
||||||
|
"""Drop tangential components from uncond score to align with cond score."""
|
||||||
|
# (B, 1, ...)
|
||||||
|
batch_num = cond_score.shape[0]
|
||||||
|
cond_score_flat = cond_score.reshape(batch_num, 1, -1).float()
|
||||||
|
uncond_score_flat = uncond_score.reshape(batch_num, 1, -1).float()
|
||||||
|
|
||||||
|
# Score matrix A (B, 2, ...)
|
||||||
|
score_matrix = torch.cat((uncond_score_flat, cond_score_flat), dim=1)
|
||||||
|
try:
|
||||||
|
_, _, Vh = torch.linalg.svd(score_matrix, full_matrices=False)
|
||||||
|
except RuntimeError:
|
||||||
|
# Fallback to CPU
|
||||||
|
_, _, Vh = torch.linalg.svd(score_matrix.cpu(), full_matrices=False)
|
||||||
|
|
||||||
|
# Drop the tangential components
|
||||||
|
v1 = Vh[:, 0:1, :].to(uncond_score_flat.device) # (B, 1, ...)
|
||||||
|
uncond_score_td = (uncond_score_flat @ v1.transpose(-2, -1)) * v1
|
||||||
|
return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype)
|
||||||
|
|
||||||
|
|
||||||
|
class TCFG(ComfyNodeABC):
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(cls) -> InputTypeDict:
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"model": (IO.MODEL, {}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.MODEL,)
|
||||||
|
RETURN_NAMES = ("patched_model",)
|
||||||
|
FUNCTION = "patch"
|
||||||
|
|
||||||
|
CATEGORY = "advanced/guidance"
|
||||||
|
DESCRIPTION = "TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality."
|
||||||
|
|
||||||
|
def patch(self, model):
|
||||||
|
m = model.clone()
|
||||||
|
|
||||||
|
def tangential_damping_cfg(args):
|
||||||
|
# Assume [cond, uncond, ...]
|
||||||
|
x = args["input"]
|
||||||
|
conds_out = args["conds_out"]
|
||||||
|
if len(conds_out) <= 1 or None in args["conds"][:2]:
|
||||||
|
# Skip when either cond or uncond is None
|
||||||
|
return conds_out
|
||||||
|
cond_pred = conds_out[0]
|
||||||
|
uncond_pred = conds_out[1]
|
||||||
|
uncond_td = score_tangential_damping(x - cond_pred, x - uncond_pred)
|
||||||
|
uncond_pred_td = x - uncond_td
|
||||||
|
return [cond_pred, uncond_pred_td] + conds_out[2:]
|
||||||
|
|
||||||
|
m.set_model_sampler_pre_cfg_function(tangential_damping_cfg)
|
||||||
|
return (m,)
|
||||||
|
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"TCFG": TCFG,
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"TCFG": "Tangential Damping CFG",
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue
Block a user