diff --git a/.ci/windows_base_files/README_VERY_IMPORTANT.txt b/.ci/windows_base_files/README_VERY_IMPORTANT.txt index d46acbcbf..8ab70c890 100755 --- a/.ci/windows_base_files/README_VERY_IMPORTANT.txt +++ b/.ci/windows_base_files/README_VERY_IMPORTANT.txt @@ -4,6 +4,9 @@ if you have a NVIDIA gpu: run_nvidia_gpu.bat +if you want to enable the fast fp16 accumulation (faster for fp16 models with slightly less quality): + +run_nvidia_gpu_fast_fp16_accumulation.bat To run it in slow CPU mode: diff --git a/comfy_extras/nodes_cfg.py b/comfy_extras/nodes_cfg.py index 1fb686644..5abdc115a 100644 --- a/comfy_extras/nodes_cfg.py +++ b/comfy_extras/nodes_cfg.py @@ -40,6 +40,33 @@ class CFGZeroStar: m.set_model_sampler_post_cfg_function(cfg_zero_star) return (m, ) +class CFGNorm: + @classmethod + def INPUT_TYPES(s): + return {"required": {"model": ("MODEL",), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}), + }} + RETURN_TYPES = ("MODEL",) + RETURN_NAMES = ("patched_model",) + FUNCTION = "patch" + CATEGORY = "advanced/guidance" + EXPERIMENTAL = True + + def patch(self, model, strength): + m = model.clone() + def cfg_norm(args): + cond_p = args['cond_denoised'] + pred_text_ = args["denoised"] + + norm_full_cond = torch.norm(cond_p, dim=1, keepdim=True) + norm_pred_text = torch.norm(pred_text_, dim=1, keepdim=True) + scale = (norm_full_cond / (norm_pred_text + 1e-8)).clamp(min=0.0, max=1.0) + return pred_text_ * scale * strength + + m.set_model_sampler_post_cfg_function(cfg_norm) + return (m, ) + NODE_CLASS_MAPPINGS = { - "CFGZeroStar": CFGZeroStar + "CFGZeroStar": CFGZeroStar, + "CFGNorm": CFGNorm, } diff --git a/comfy_extras/nodes_custom_sampler.py b/comfy_extras/nodes_custom_sampler.py index 33bc41842..d011f433b 100644 --- a/comfy_extras/nodes_custom_sampler.py +++ b/comfy_extras/nodes_custom_sampler.py @@ -301,6 +301,35 @@ class ExtendIntermediateSigmas: return (extended_sigmas,) + +class SamplingPercentToSigma: + @classmethod + def INPUT_TYPES(cls) -> InputTypeDict: + return { + "required": { + "model": (IO.MODEL, {}), + "sampling_percent": (IO.FLOAT, {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.0001}), + "return_actual_sigma": (IO.BOOLEAN, {"default": False, "tooltip": "Return the actual sigma value instead of the value used for interval checks.\nThis only affects results at 0.0 and 1.0."}), + } + } + + RETURN_TYPES = (IO.FLOAT,) + RETURN_NAMES = ("sigma_value",) + CATEGORY = "sampling/custom_sampling/sigmas" + + FUNCTION = "get_sigma" + + def get_sigma(self, model, sampling_percent, return_actual_sigma): + model_sampling = model.get_model_object("model_sampling") + sigma_val = model_sampling.percent_to_sigma(sampling_percent) + if return_actual_sigma: + if sampling_percent == 0.0: + sigma_val = model_sampling.sigma_max.item() + elif sampling_percent == 1.0: + sigma_val = model_sampling.sigma_min.item() + return (sigma_val,) + + class KSamplerSelect: @classmethod def INPUT_TYPES(s): @@ -683,9 +712,10 @@ class CFGGuider: return (guider,) class Guider_DualCFG(comfy.samplers.CFGGuider): - def set_cfg(self, cfg1, cfg2): + def set_cfg(self, cfg1, cfg2, nested=False): self.cfg1 = cfg1 self.cfg2 = cfg2 + self.nested = nested def set_conds(self, positive, middle, negative): middle = node_helpers.conditioning_set_values(middle, {"prompt_type": "negative"}) @@ -695,14 +725,20 @@ class Guider_DualCFG(comfy.samplers.CFGGuider): negative_cond = self.conds.get("negative", None) middle_cond = self.conds.get("middle", None) positive_cond = self.conds.get("positive", None) - if model_options.get("disable_cfg1_optimization", False) == False: - if math.isclose(self.cfg2, 1.0): - negative_cond = None - if math.isclose(self.cfg1, 1.0): - middle_cond = None - out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) - return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1 + if self.nested: + out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) + pred_text = comfy.samplers.cfg_function(self.inner_model, out[2], out[1], self.cfg1, x, timestep, model_options=model_options, cond=positive_cond, uncond=middle_cond) + return out[0] + self.cfg2 * (pred_text - out[0]) + else: + if model_options.get("disable_cfg1_optimization", False) == False: + if math.isclose(self.cfg2, 1.0): + negative_cond = None + if math.isclose(self.cfg1, 1.0): + middle_cond = None + + out = comfy.samplers.calc_cond_batch(self.inner_model, [negative_cond, middle_cond, positive_cond], x, timestep, model_options) + return comfy.samplers.cfg_function(self.inner_model, out[1], out[0], self.cfg2, x, timestep, model_options=model_options, cond=middle_cond, uncond=negative_cond) + (out[2] - out[1]) * self.cfg1 class DualCFGGuider: @classmethod @@ -714,6 +750,7 @@ class DualCFGGuider: "negative": ("CONDITIONING", ), "cfg_conds": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), "cfg_cond2_negative": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01}), + "style": (["regular", "nested"],), } } @@ -722,10 +759,10 @@ class DualCFGGuider: FUNCTION = "get_guider" CATEGORY = "sampling/custom_sampling/guiders" - def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative): + def get_guider(self, model, cond1, cond2, negative, cfg_conds, cfg_cond2_negative, style): guider = Guider_DualCFG(model) guider.set_conds(cond1, cond2, negative) - guider.set_cfg(cfg_conds, cfg_cond2_negative) + guider.set_cfg(cfg_conds, cfg_cond2_negative, nested=(style == "nested")) return (guider,) class DisableNoise: @@ -879,6 +916,7 @@ NODE_CLASS_MAPPINGS = { "FlipSigmas": FlipSigmas, "SetFirstSigma": SetFirstSigma, "ExtendIntermediateSigmas": ExtendIntermediateSigmas, + "SamplingPercentToSigma": SamplingPercentToSigma, "CFGGuider": CFGGuider, "DualCFGGuider": DualCFGGuider, diff --git a/requirements.txt b/requirements.txt index 52444a303..3619b6bfa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ comfyui-frontend-package==1.23.4 -comfyui-workflow-templates==0.1.36 +comfyui-workflow-templates==0.1.37 comfyui-embedded-docs==0.2.4 comfyui_manager torch