mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-23 12:50:18 +08:00
Merge branch 'comfyanonymous:master' into master
This commit is contained in:
commit
f143a803d6
@ -1080,7 +1080,6 @@ def sample_euler_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disabl
|
|||||||
d = to_d(x, sigma_hat, temp[0])
|
d = to_d(x, sigma_hat, temp[0])
|
||||||
if callback is not None:
|
if callback is not None:
|
||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigma_hat, 'denoised': denoised})
|
||||||
dt = sigmas[i + 1] - sigma_hat
|
|
||||||
# Euler method
|
# Euler method
|
||||||
x = denoised + d * sigmas[i + 1]
|
x = denoised + d * sigmas[i + 1]
|
||||||
return x
|
return x
|
||||||
@ -1107,7 +1106,6 @@ def sample_euler_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=No
|
|||||||
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
||||||
d = to_d(x, sigmas[i], temp[0])
|
d = to_d(x, sigmas[i], temp[0])
|
||||||
# Euler method
|
# Euler method
|
||||||
dt = sigma_down - sigmas[i]
|
|
||||||
x = denoised + d * sigma_down
|
x = denoised + d * sigma_down
|
||||||
if sigmas[i + 1] > 0:
|
if sigmas[i + 1] > 0:
|
||||||
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
|
||||||
@ -1138,7 +1136,6 @@ def sample_dpmpp_2s_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback
|
|||||||
if sigma_down == 0:
|
if sigma_down == 0:
|
||||||
# Euler method
|
# Euler method
|
||||||
d = to_d(x, sigmas[i], temp[0])
|
d = to_d(x, sigmas[i], temp[0])
|
||||||
dt = sigma_down - sigmas[i]
|
|
||||||
x = denoised + d * sigma_down
|
x = denoised + d * sigma_down
|
||||||
else:
|
else:
|
||||||
# DPM-Solver++(2S)
|
# DPM-Solver++(2S)
|
||||||
@ -1186,4 +1183,4 @@ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, dis
|
|||||||
denoised_mix = -torch.exp(-h) * uncond_denoised - torch.expm1(-h) * (1 / (2 * r)) * (denoised - old_uncond_denoised)
|
denoised_mix = -torch.exp(-h) * uncond_denoised - torch.expm1(-h) * (1 / (2 * r)) * (denoised - old_uncond_denoised)
|
||||||
x = denoised + denoised_mix + torch.exp(-h) * x
|
x = denoised + denoised_mix + torch.exp(-h) * x
|
||||||
old_uncond_denoised = uncond_denoised
|
old_uncond_denoised = uncond_denoised
|
||||||
return x
|
return x
|
||||||
|
|||||||
@ -405,7 +405,7 @@ class SDTokenizer:
|
|||||||
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True, min_length=None, pad_token=None, tokenizer_data={}):
|
def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True, min_length=None, pad_token=None, tokenizer_data={}):
|
||||||
if tokenizer_path is None:
|
if tokenizer_path is None:
|
||||||
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
|
tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer")
|
||||||
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path)
|
self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, clean_up_tokenization_spaces=True) # Fix Transformers FutureWarning by explicitly setting clean_up_tokenization_spaces to True
|
||||||
self.max_length = max_length
|
self.max_length = max_length
|
||||||
self.min_length = min_length
|
self.min_length = min_length
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user