mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-10 06:10:50 +08:00
Fix tests
This commit is contained in:
parent
5be969f20a
commit
cf2eaedc5b
@ -31,7 +31,7 @@ extension-pkg-allow-list=
|
||||
# be loaded. Extensions are loading into the active Python interpreter and may
|
||||
# run arbitrary code. (This is an alternative name to extension-pkg-allow-list
|
||||
# for backward compatibility.)
|
||||
extension-pkg-whitelist=
|
||||
extension-pkg-whitelist=cv2
|
||||
|
||||
# Return non-zero exit code if any of these messages/categories are detected,
|
||||
# even if score is above --fail-under value. Syntax same as enable. Messages
|
||||
|
||||
@ -63,6 +63,7 @@ def get_or_download(folder_name: str, filename: str, known_files: List[HuggingFi
|
||||
hf_destination_dir = this_model_directory
|
||||
|
||||
# converted 16 bit files should be skipped
|
||||
# todo: the file size should be replaced with a file hash
|
||||
path = os.path.join(hf_destination_dir, known_file.filename)
|
||||
try:
|
||||
file_size = os.stat(path, follow_symlinks=True).st_size if os.path.isfile(path) else None
|
||||
@ -73,6 +74,7 @@ def get_or_download(folder_name: str, filename: str, known_files: List[HuggingFi
|
||||
|
||||
path = hf_hub_download(repo_id=known_file.repo_id,
|
||||
filename=known_file.filename,
|
||||
# todo: in the latest huggingface implementation, this causes files to be downloaded as though the destination is the cache dir, rather than a local directory linking to a cache dir
|
||||
local_dir=hf_destination_dir,
|
||||
repo_type=known_file.repo_type,
|
||||
revision=known_file.revision,
|
||||
|
||||
@ -68,8 +68,8 @@ class ImageApplyColorMap(CustomNode):
|
||||
colored_image = normalized_depth_uint8
|
||||
else:
|
||||
cv2_colormap = getattr(cv2, colormap)
|
||||
colored_image = cv2.applyColorMap(normalized_depth_uint8, cv2_colormap)
|
||||
colored_image_rgb = cv2.cvtColor(colored_image, cv2.COLOR_BGR2RGB)
|
||||
colored_image = cv2.applyColorMap(normalized_depth_uint8, cv2_colormap) # pylint: disable=no-member
|
||||
colored_image_rgb = cv2.cvtColor(colored_image, cv2.COLOR_BGR2RGB) # pylint: disable=no-member
|
||||
rgb_tensor = torch.tensor(colored_image_rgb) * 1.0 / 255.0
|
||||
colored_images.append(rgb_tensor)
|
||||
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
import hashlib
|
||||
|
||||
import torch
|
||||
import comfy.model_management
|
||||
from comfy.cmd import folder_paths
|
||||
@ -38,6 +40,7 @@ class VAEEncodeAudio:
|
||||
def encode(self, vae, audio):
|
||||
sample_rate = audio["sample_rate"]
|
||||
if 44100 != sample_rate:
|
||||
import torchaudio
|
||||
waveform = torchaudio.functional.resample(audio["waveform"], sample_rate, 44100)
|
||||
else:
|
||||
waveform = audio["waveform"]
|
||||
|
||||
@ -232,28 +232,6 @@ class SamplerDPMPP_3M_SDE:
|
||||
sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise})
|
||||
return (sampler, )
|
||||
|
||||
class SamplerDPMPP_3M_SDE:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"eta": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
|
||||
"s_noise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step":0.01, "round": False}),
|
||||
"noise_device": (['gpu', 'cpu'], ),
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("SAMPLER",)
|
||||
CATEGORY = "sampling/custom_sampling/samplers"
|
||||
|
||||
FUNCTION = "get_sampler"
|
||||
|
||||
def get_sampler(self, eta, s_noise, noise_device):
|
||||
if noise_device == 'cpu':
|
||||
sampler_name = "dpmpp_3m_sde"
|
||||
else:
|
||||
sampler_name = "dpmpp_3m_sde_gpu"
|
||||
sampler = comfy.samplers.ksampler(sampler_name, {"eta": eta, "s_noise": s_noise})
|
||||
return (sampler, )
|
||||
|
||||
class SamplerDPMPP_2M_SDE:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
import math
|
||||
from einops import rearrange
|
||||
# Use torch rng for consistency across generations
|
||||
from torch import randint
|
||||
from torch import randint # pylint: disable=no-name-in-module
|
||||
|
||||
def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
|
||||
min_value = min(min_value, value)
|
||||
|
||||
@ -4,6 +4,9 @@ import comfy.latent_formats
|
||||
import torch
|
||||
|
||||
class LCM(comfy.model_sampling.EPS):
|
||||
def timestep(self, *args, **kwargs) -> torch.Tensor:
|
||||
pass
|
||||
|
||||
def calculate_denoised(self, sigma, model_output, model_input):
|
||||
timestep = self.timestep(sigma).view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
sigma = sigma.view(sigma.shape[:1] + (1,) * (model_output.ndim - 1))
|
||||
@ -84,6 +87,7 @@ class ModelSamplingDiscrete:
|
||||
def patch(self, model, sampling, zsnr):
|
||||
m = model.clone()
|
||||
|
||||
sampling_type = "eps"
|
||||
sampling_base = comfy.model_sampling.ModelSamplingDiscrete
|
||||
if sampling == "eps":
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
@ -176,6 +180,7 @@ class ModelSamplingContinuousEDM:
|
||||
|
||||
latent_format = None
|
||||
sigma_data = 1.0
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
if sampling == "eps":
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
elif sampling == "v_prediction":
|
||||
@ -214,6 +219,7 @@ class ModelSamplingContinuousV:
|
||||
|
||||
latent_format = None
|
||||
sigma_data = 1.0
|
||||
sampling_type = comfy.model_sampling.EPS
|
||||
if sampling == "v_prediction":
|
||||
sampling_type = comfy.model_sampling.V_PREDICTION
|
||||
|
||||
|
||||
@ -182,7 +182,7 @@ def save_checkpoint(model, clip=None, vae=None, clip_vision=None, filename_prefi
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
|
||||
elif isinstance(model.model, model_base.SVD_img2vid):
|
||||
metadata["modelspec.architecture"] = "stable-video-diffusion-img2vid-v1"
|
||||
elif isinstance(model.model, comfy.model_base.SD3):
|
||||
elif isinstance(model.model, model_base.SD3):
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-v3-medium" #TODO: other SD3 variants
|
||||
else:
|
||||
enable_modelspec = False
|
||||
|
||||
@ -52,7 +52,7 @@ class ModelMergeSDXL(nodes_model_merging.ModelMergeBlocks):
|
||||
|
||||
return {"required": arg_dict}
|
||||
|
||||
class ModelMergeSD3_2B(comfy_extras.nodes_model_merging.ModelMergeBlocks):
|
||||
class ModelMergeSD3_2B(nodes_model_merging.ModelMergeBlocks):
|
||||
CATEGORY = "advanced/model_merging/model_specific"
|
||||
|
||||
@classmethod
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from comfy.cmd import folder_paths
|
||||
from comfy import clip_model
|
||||
from comfy import clip_model, model_management, utils
|
||||
from comfy import clip_vision
|
||||
from comfy import ops
|
||||
|
||||
@ -90,9 +90,9 @@ class FuseModule(nn.Module):
|
||||
|
||||
class PhotoMakerIDEncoder(clip_model.CLIPVisionModelProjection):
|
||||
def __init__(self):
|
||||
self.load_device = comfy.model_management.text_encoder_device()
|
||||
offload_device = comfy.model_management.text_encoder_offload_device()
|
||||
dtype = comfy.model_management.text_encoder_dtype(self.load_device)
|
||||
self.load_device = model_management.text_encoder_device()
|
||||
offload_device = model_management.text_encoder_offload_device()
|
||||
dtype = model_management.text_encoder_dtype(self.load_device)
|
||||
|
||||
super().__init__(VISION_CONFIG_DICT, dtype, offload_device, ops.manual_cast)
|
||||
self.visual_projection_2 = ops.manual_cast.Linear(1024, 1280, bias=False)
|
||||
@ -128,7 +128,7 @@ class PhotoMakerLoader:
|
||||
def load_photomaker_model(self, photomaker_model_name):
|
||||
photomaker_model_path = folder_paths.get_full_path("photomaker", photomaker_model_name)
|
||||
photomaker_model = PhotoMakerIDEncoder()
|
||||
data = comfy.utils.load_torch_file(photomaker_model_path, safe_load=True)
|
||||
data = utils.load_torch_file(photomaker_model_path, safe_load=True)
|
||||
if "id_encoder" in data:
|
||||
data = data["id_encoder"]
|
||||
photomaker_model.load_state_dict(data)
|
||||
|
||||
@ -181,6 +181,8 @@ class Quantize:
|
||||
elif dither.startswith("bayer"):
|
||||
order = int(dither.split('-')[-1])
|
||||
quantized_image = Quantize.bayer(im, pal_im, order)
|
||||
else:
|
||||
raise ValueError(f"dither was unexpected value {dither}")
|
||||
|
||||
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
|
||||
result[b] = quantized_array
|
||||
|
||||
@ -8,7 +8,7 @@ from comfy.model_downloader import get_filename_list_with_downloadable, KNOWN_UP
|
||||
|
||||
|
||||
try:
|
||||
from spandrel_extra_arches import EXTRA_REGISTRY
|
||||
from spandrel_extra_arches import EXTRA_REGISTRY # pylint: disable=import-error
|
||||
from spandrel import MAIN_REGISTRY
|
||||
MAIN_REGISTRY.add(*EXTRA_REGISTRY)
|
||||
logging.info("Successfully imported spandrel_extra_arches: support for non commercial upscale models.")
|
||||
|
||||
@ -2,16 +2,6 @@ import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from comfy import model_management
|
||||
from comfy.model_management import CPUState
|
||||
|
||||
try:
|
||||
has_gpu = torch.device(torch.cuda.current_device()) is not None
|
||||
except:
|
||||
has_gpu = False
|
||||
|
||||
model_management.cpu_state = CPUState.GPU if has_gpu else CPUState.CPU
|
||||
|
||||
from comfy.nodes.base_nodes import ImagePadForOutpaint, ImageBatch, ImageInvert, ImageScaleBy, ImageScale, LatentCrop, \
|
||||
LatentComposite, LatentFlip, LatentRotate, LatentUpscaleBy, LatentUpscale, InpaintModelConditioning, CLIPTextEncode, \
|
||||
VAEEncodeForInpaint, VAEEncode, VAEDecode, ConditioningSetMask, ConditioningSetArea, ConditioningCombine, \
|
||||
@ -60,20 +50,24 @@ def test_conditioning_set_mask(clip):
|
||||
assert cond[0][1]["mask_strength"] == 1.0
|
||||
|
||||
|
||||
def test_vae_decode(vae):
|
||||
def test_vae_decode(vae, has_gpu):
|
||||
if not has_gpu:
|
||||
pytest.skip("requires gpu for performant testing")
|
||||
decoded, = VAEDecode().decode(vae, _latent)
|
||||
assert decoded.shape == (1, 512, 512, 3)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not has_gpu, reason="requires gpu for performant testing")
|
||||
def test_vae_encode(vae):
|
||||
def test_vae_encode(has_gpu, vae):
|
||||
if not has_gpu:
|
||||
pytest.skip("requires gpu for performant testing")
|
||||
latent, = VAEEncode().encode(vae, _image_512x512)
|
||||
assert "samples" in latent
|
||||
assert latent["samples"].shape == (1, 4, 64, 64)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not has_gpu, reason="requires gpu for performant testing")
|
||||
def test_vae_encode_for_inpaint(vae):
|
||||
def test_vae_encode_for_inpaint(has_gpu, vae):
|
||||
if not has_gpu:
|
||||
pytest.skip("requires gpu for performant testing")
|
||||
mask = torch.ones((1, 512, 512))
|
||||
latent, = VAEEncodeForInpaint().encode(vae, _image_512x512, mask)
|
||||
assert "samples" in latent
|
||||
@ -82,8 +76,9 @@ def test_vae_encode_for_inpaint(vae):
|
||||
assert torch.allclose(latent["noise_mask"], mask)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not has_gpu, reason="requires gpu for performant testing")
|
||||
def test_inpaint_model_conditioning(model, vae, clip):
|
||||
def test_inpaint_model_conditioning(model, vae, clip, has_gpu):
|
||||
if not has_gpu:
|
||||
pytest.skip("requires gpu for performant testing")
|
||||
cond_pos, = CLIPTextEncode().encode(clip, "test prompt")
|
||||
cond_neg, = CLIPTextEncode().encode(clip, "test negative prompt")
|
||||
pos, neg, latent = InpaintModelConditioning().encode(cond_pos, cond_neg, _image_512x512, vae, torch.ones((1, 512, 512)))
|
||||
|
||||
@ -21,40 +21,43 @@ _image_1x1 = torch.zeros((1, 1, 3), dtype=torch.float32, device="cpu")
|
||||
def test_save_image_response():
|
||||
assert SaveImagesResponse.INPUT_TYPES() is not None
|
||||
n = SaveImagesResponse()
|
||||
result = n.execute(images=[_image_1x1], uris=["with_prefix/1.png"], name="test")
|
||||
ui_node_ret_dict = n.execute(images=[_image_1x1], uris=["with_prefix/1.png"], name="test")
|
||||
assert os.path.isfile(os.path.join(folder_paths.get_output_directory(), "with_prefix/1.png"))
|
||||
assert len(result["result"]) == 1
|
||||
assert len(result["ui"]["images"]) == 1
|
||||
assert result["result"][0]["filename"] == "1.png"
|
||||
assert result["result"][0]["subfolder"] == "with_prefix"
|
||||
assert result["result"][0]["name"] == "test"
|
||||
assert len(ui_node_ret_dict["result"]) == 1
|
||||
assert len(ui_node_ret_dict["ui"]["images"]) == 1
|
||||
image_result, = ui_node_ret_dict["result"]
|
||||
assert image_result[0]["filename"] == "1.png"
|
||||
assert image_result[0]["subfolder"] == "with_prefix"
|
||||
assert image_result[0]["name"] == "test"
|
||||
|
||||
|
||||
def test_save_image_response_abs_local_uris():
|
||||
assert SaveImagesResponse.INPUT_TYPES() is not None
|
||||
n = SaveImagesResponse()
|
||||
result = n.execute(images=[_image_1x1], uris=[os.path.join(folder_paths.get_output_directory(), "with_prefix/1.png")], name="test")
|
||||
ui_node_ret_dict = n.execute(images=[_image_1x1], uris=[os.path.join(folder_paths.get_output_directory(), "with_prefix/1.png")], name="test")
|
||||
assert os.path.isfile(os.path.join(folder_paths.get_output_directory(), "with_prefix/1.png"))
|
||||
assert len(result["result"]) == 1
|
||||
assert len(result["ui"]["images"]) == 1
|
||||
assert result["result"][0]["filename"] == "1.png"
|
||||
assert result["result"][0]["subfolder"] == "with_prefix"
|
||||
assert result["result"][0]["name"] == "test"
|
||||
assert len(ui_node_ret_dict["result"]) == 1
|
||||
assert len(ui_node_ret_dict["ui"]["images"]) == 1
|
||||
image_result, = ui_node_ret_dict["result"]
|
||||
assert image_result[0]["filename"] == "1.png"
|
||||
assert image_result[0]["subfolder"] == "with_prefix"
|
||||
assert image_result[0]["name"] == "test"
|
||||
|
||||
|
||||
def test_save_image_response_remote_uris():
|
||||
n = SaveImagesResponse()
|
||||
uri = "memory://some_folder/1.png"
|
||||
result = n.execute(images=[_image_1x1], uris=[uri])
|
||||
assert len(result["result"]) == 1
|
||||
assert len(result["ui"]["images"]) == 1
|
||||
filename_ = result["result"][0]["filename"]
|
||||
ui_node_ret_dict = n.execute(images=[_image_1x1], uris=[uri])
|
||||
assert len(ui_node_ret_dict["result"]) == 1
|
||||
assert len(ui_node_ret_dict["ui"]["images"]) == 1
|
||||
image_result, = ui_node_ret_dict["result"]
|
||||
filename_ = image_result[0]["filename"]
|
||||
assert filename_ != "1.png"
|
||||
assert filename_ != ""
|
||||
assert uuid.UUID(filename_.replace(".png", "")) is not None
|
||||
assert os.path.isfile(os.path.join(folder_paths.get_output_directory(), filename_))
|
||||
assert result["result"][0]["abs_path"] == uri
|
||||
assert result["result"][0]["subfolder"] == ""
|
||||
assert image_result[0]["abs_path"] == uri
|
||||
assert image_result[0]["subfolder"] == ""
|
||||
|
||||
|
||||
def test_save_exif():
|
||||
@ -72,13 +75,14 @@ def test_save_exif():
|
||||
def test_no_local_file():
|
||||
n = SaveImagesResponse()
|
||||
uri = "memory://some_folder/2.png"
|
||||
result = n.execute(images=[_image_1x1], uris=[uri], local_uris=["/dev/null"])
|
||||
assert len(result["result"]) == 1
|
||||
assert len(result["ui"]["images"]) == 1
|
||||
assert result["result"][0]["filename"] == ""
|
||||
assert not os.path.isfile(os.path.join(folder_paths.get_output_directory(), result["result"][0]["filename"]))
|
||||
assert result["result"][0]["abs_path"] == uri
|
||||
assert result["result"][0]["subfolder"] == ""
|
||||
ui_node_ret_dict = n.execute(images=[_image_1x1], uris=[uri], local_uris=["/dev/null"])
|
||||
assert len(ui_node_ret_dict["result"]) == 1
|
||||
assert len(ui_node_ret_dict["ui"]["images"]) == 1
|
||||
image_result, = ui_node_ret_dict["result"]
|
||||
assert image_result[0]["filename"] == ""
|
||||
assert not os.path.isfile(os.path.join(folder_paths.get_output_directory(), image_result[0]["filename"]))
|
||||
assert image_result[0]["abs_path"] == uri
|
||||
assert image_result[0]["subfolder"] == ""
|
||||
|
||||
|
||||
def test_int_request_parameter():
|
||||
|
||||
Loading…
Reference in New Issue
Block a user