diff --git a/app/frontend_management.py b/app/frontend_management.py index 9c832e46d..191408aca 100644 --- a/app/frontend_management.py +++ b/app/frontend_management.py @@ -151,6 +151,15 @@ class FrontendManager: return cls.DEFAULT_FRONTEND_PATH repo_owner, repo_name, version = cls.parse_version_string(version_string) + + if version.startswith("v"): + expected_path = str(Path(cls.CUSTOM_FRONTENDS_ROOT) / f"{repo_owner}_{repo_name}" / version.lstrip("v")) + if os.path.exists(expected_path): + logging.info(f"Using existing copy of specific frontend version tag: {repo_owner}/{repo_name}@{version}") + return expected_path + + logging.info(f"Initializing frontend: {repo_owner}/{repo_name}@{version}, requesting version details from GitHub...") + provider = provider or FrontEndProvider(repo_owner, repo_name) release = provider.get_release(version) @@ -159,16 +168,20 @@ class FrontendManager: Path(cls.CUSTOM_FRONTENDS_ROOT) / provider.folder_name / semantic_version ) if not os.path.exists(web_root): + # Use tmp path until complete to avoid path exists check passing from interrupted downloads + tmp_path = web_root + ".tmp" try: - os.makedirs(web_root, exist_ok=True) + os.makedirs(tmp_path, exist_ok=True) logging.info( "Downloading frontend(%s) version(%s) to (%s)", provider.folder_name, semantic_version, - web_root, + tmp_path, ) logging.debug(release) - download_release_asset_zip(release, destination_path=web_root) + download_release_asset_zip(release, destination_path=tmp_path) + if os.listdir(tmp_path): + os.rename(tmp_path, web_root) finally: # Clean up the directory if it is empty, i.e. the download failed if not os.listdir(web_root): diff --git a/comfy/sd1_clip.py b/comfy/sd1_clip.py index 6f574900f..bb240526f 100644 --- a/comfy/sd1_clip.py +++ b/comfy/sd1_clip.py @@ -405,7 +405,7 @@ class SDTokenizer: def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, pad_to_max_length=True, min_length=None, pad_token=None, tokenizer_data={}): if tokenizer_path is None: tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "sd1_tokenizer") - self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path, clean_up_tokenization_spaces=True) # Fix Transformers FutureWarning by explicitly setting clean_up_tokenization_spaces to True + self.tokenizer = tokenizer_class.from_pretrained(tokenizer_path) self.max_length = max_length self.min_length = min_length diff --git a/comfy_extras/nodes_latent.py b/comfy_extras/nodes_latent.py index 1c271b827..af2736818 100644 --- a/comfy_extras/nodes_latent.py +++ b/comfy_extras/nodes_latent.py @@ -1,4 +1,5 @@ import comfy.utils +import comfy_extras.nodes_post_processing import torch def reshape_latent_to(target_shape, latent): @@ -221,6 +222,55 @@ class LatentOperationTonemapReinhard: return normalized_latent * new_magnitude return (tonemap_reinhard,) +class LatentOperationSharpen: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sharpen_radius": ("INT", { + "default": 9, + "min": 1, + "max": 31, + "step": 1 + }), + "sigma": ("FLOAT", { + "default": 1.0, + "min": 0.1, + "max": 10.0, + "step": 0.1 + }), + "alpha": ("FLOAT", { + "default": 0.1, + "min": 0.0, + "max": 5.0, + "step": 0.01 + }), + }} + + RETURN_TYPES = ("LATENT_OPERATION",) + FUNCTION = "op" + + CATEGORY = "latent/advanced/operations" + EXPERIMENTAL = True + + def op(self, sharpen_radius, sigma, alpha): + def sharpen(latent, **kwargs): + luminance = (torch.linalg.vector_norm(latent, dim=(1)) + 1e-6)[:,None] + normalized_latent = latent / luminance + channels = latent.shape[1] + + kernel_size = sharpen_radius * 2 + 1 + kernel = comfy_extras.nodes_post_processing.gaussian_kernel(kernel_size, sigma, device=luminance.device) + center = kernel_size // 2 + + kernel *= alpha * -10 + kernel[center, center] = kernel[center, center] - kernel.sum() + 1.0 + + padded_image = torch.nn.functional.pad(normalized_latent, (sharpen_radius,sharpen_radius,sharpen_radius,sharpen_radius), 'reflect') + sharpened = torch.nn.functional.conv2d(padded_image, kernel.repeat(channels, 1, 1).unsqueeze(1), padding=kernel_size // 2, groups=channels)[:,:,sharpen_radius:-sharpen_radius, sharpen_radius:-sharpen_radius] + + return luminance * sharpened + return (sharpen,) + NODE_CLASS_MAPPINGS = { "LatentAdd": LatentAdd, "LatentSubtract": LatentSubtract, @@ -231,4 +281,5 @@ NODE_CLASS_MAPPINGS = { "LatentApplyOperation": LatentApplyOperation, "LatentApplyOperationCFG": LatentApplyOperationCFG, "LatentOperationTonemapReinhard": LatentOperationTonemapReinhard, + "LatentOperationSharpen": LatentOperationSharpen, }