Merge branch 'master' of github.com:comfyanonymous/ComfyUI

This commit is contained in:
doctorpangloss 2025-02-24 09:39:26 -08:00
commit 693038738a
90 changed files with 56292 additions and 5808 deletions

View File

@ -29,6 +29,7 @@ A vanilla, up-to-date fork of [ComfyUI](https://github.com/comfyanonymous/comfyu
- [AuraFlow](https://comfyanonymous.github.io/ComfyUI_examples/aura_flow/)
- [HunyuanDiT](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_dit/)
- [Flux](https://comfyanonymous.github.io/ComfyUI_examples/flux/)
- [Lumina Image 2.0](https://comfyanonymous.github.io/ComfyUI_examples/lumina2/)
- Video Models
- [Stable Video Diffusion](https://comfyanonymous.github.io/ComfyUI_examples/video/)
- [Mochi](https://comfyanonymous.github.io/ComfyUI_examples/mochi/)
@ -1307,7 +1308,7 @@ For any bugs, issues, or feature requests related to the frontend, please use th
The new frontend is now the default for ComfyUI. However, please note:
1. The frontend in the main ComfyUI repository is updated weekly.
1. The frontend in the main ComfyUI repository is updated fortnightly.
2. Daily releases are available in the separate frontend repository.
To use the most up-to-date frontend version:
@ -1324,7 +1325,7 @@ To use the most up-to-date frontend version:
--front-end-version Comfy-Org/ComfyUI_frontend@1.2.2
```
This approach allows you to easily switch between the stable weekly release and the cutting-edge daily updates, or even specific versions for testing purposes.
This approach allows you to easily switch between the stable fortnightly release and the cutting-edge daily updates, or even specific versions for testing purposes.
### Accessing the Legacy Frontend
@ -1338,7 +1339,7 @@ This will use a snapshot of the legacy frontend preserved in the [ComfyUI Legacy
## Community
[Chat on Matrix: #comfyui_space:matrix.org](https://app.element.io/#/room/%23comfyui_space%3Amatrix.org), an alternative to Discord.
[Discord](https://comfy.org/discord): Try the #help or #feedback channels.
## Known Issues

View File

@ -1 +1 @@
__version__ = "0.3.11"
__version__ = "0.3.15"

View File

@ -1,13 +1,9 @@
from typing import Optional
from aiohttp import web
from ...services.file_service import FileService
from ...services.terminal_service import TerminalService
from ....app import logger
from ....cmd.folder_paths import models_dir, user_directory, output_directory, \
folder_names_and_paths # pylint: disable=import-error
from typing import Optional
from folder_paths import folder_names_and_paths, get_directory_by_type
from api_server.services.terminal_service import TerminalService
import app.logger
import os
class InternalRoutes:
'''
@ -19,35 +15,19 @@ class InternalRoutes:
def __init__(self, prompt_server):
self.routes: web.RouteTableDef = web.RouteTableDef()
self._app: Optional[web.Application] = None
self.file_service = FileService({
"models": models_dir,
"user": user_directory,
"output": output_directory
})
self.prompt_server = prompt_server
self.terminal_service = TerminalService(prompt_server)
def setup_routes(self):
@self.routes.get('/files')
async def list_files(request):
directory_key = request.query.get('directory', '')
try:
file_list = self.file_service.list_files(directory_key)
return web.json_response({"files": file_list})
except ValueError as e:
return web.json_response({"error": str(e)}, status=400)
except Exception as e:
return web.json_response({"error": str(e)}, status=500)
@self.routes.get('/logs')
async def get_logs(request):
return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in logger.get_logs()]))
return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()]))
@self.routes.get('/logs/raw')
async def get_logs_raw(request):
async def get_raw_logs(request):
self.terminal_service.update_size()
return web.json_response({
"entries": list(logger.get_logs()),
"entries": list(app.logger.get_logs()),
"size": {"cols": self.terminal_service.cols, "rows": self.terminal_service.rows}
})
@ -63,6 +43,7 @@ class InternalRoutes:
return web.Response(status=200)
@self.routes.get('/folder_paths')
async def get_folder_paths(request):
response = {}
@ -70,6 +51,20 @@ class InternalRoutes:
response[key] = folder_names_and_paths[key][0]
return web.json_response(response)
@self.routes.get('/files/{directory_type}')
async def get_files(request: web.Request) -> web.Response:
directory_type = request.match_info['directory_type']
if directory_type not in ("output", "input", "temp"):
return web.json_response({"error": "Invalid directory type"}, status=400)
directory = get_directory_by_type(directory_type)
sorted_files = sorted(
(entry for entry in os.scandir(directory) if entry.is_file()),
key=lambda entry: -entry.stat().st_mtime
)
return web.json_response([entry.name for entry in sorted_files], status=200)
def get_app(self):
if self._app is None:
self._app = web.Application()

View File

@ -1,15 +0,0 @@
from typing import Dict, List, Optional
from ..utils.file_operations import FileSystemOperations, FileSystemItem
class FileService:
def __init__(self, allowed_directories: Dict[str, str], file_system_ops: Optional[FileSystemOperations] = None):
self.allowed_directories: Dict[str, str] = allowed_directories
self.file_system_ops: FileSystemOperations = file_system_ops or FileSystemOperations()
def list_files(self, directory_key: str) -> List[FileSystemItem]:
if directory_key not in self.allowed_directories:
raise ValueError("Invalid directory key")
directory_path: str = self.allowed_directories[directory_key]
return self.file_system_ops.walk_directory(directory_path)

View File

@ -35,12 +35,13 @@ def _create_parser() -> EnhancedConfigArgParser:
parser.add_argument("--enable-cors-header", type=str, default=None, metavar="ORIGIN", nargs="?", const="*",
help="Enable CORS (Cross-Origin Resource Sharing) with optional origin or allow all with default '*'.")
parser.add_argument("--max-upload-size", type=float, default=100, help="Set the maximum upload size in MB.")
parser.add_argument("--base-directory", type=str, default=None, help="Set the ComfyUI base directory for models, custom_nodes, input, output, temp, and user directories.")
parser.add_argument("--extra-model-paths-config", type=str, default=None, metavar="PATH", nargs='+',
action='append', help="Load one or more extra_model_paths.yaml files.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory. Overrides --base-directory.")
parser.add_argument("--temp-directory", type=str, default=None,
help="Set the ComfyUI temp directory (default is in the ComfyUI directory).")
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory.")
help="Set the ComfyUI temp directory (default is in the ComfyUI directory). Overrides --base-directory.")
parser.add_argument("--input-directory", type=str, default=None, help="Set the ComfyUI input directory. Overrides --base-directory.")
parser.add_argument("--auto-launch", action="store_true",
help="Automatically launch ComfyUI in the default browser.")
parser.add_argument("--disable-auto-launch", action="store_true", help="Disable auto launching the browser.")
@ -250,7 +251,9 @@ def _create_parser() -> EnhancedConfigArgParser:
env_var="ANTHROPIC_API_KEY"
)
parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path.")
parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path. Overrides --base-directory.")
parser.add_argument("--enable-compress-response-body", action="store_true", help="Enable compressing response body.")
# now give plugins a chance to add configuration
for entry_point in entry_points().select(group='comfyui.custom_config'):
@ -284,6 +287,9 @@ def _parse_args(parser: Optional[argparse.ArgumentParser] = None, args_parsing:
if args.disable_auto_launch:
args.auto_launch = False
if args.force_fp16:
args.fp16_unet = True
configuration_obj = Configuration(**vars(args))
configuration_obj.config_files = config_files
assert all(isinstance(config_file, str) for config_file in config_files)

View File

@ -40,6 +40,7 @@ class Configuration(dict):
config_files (Optional[List[str]]): Path to the configuration file(s) that were set in the arguments.
cwd (Optional[str]): Working directory. Defaults to the current directory. This is always treated as a base path for model files, and it will be the place where model files are downloaded.
base_paths (Optional[list[str]]): Additional base paths for custom nodes, models and inputs.
base_directory (Optional[str]): Set the ComfyUI base directory for models, custom_nodes, input, output, temp, and user directories.
listen (str): IP address to listen on. Defaults to "127.0.0.1".
port (int): Port number for the server to listen on. Defaults to 8188.
enable_cors_header (Optional[str]): Enables CORS with the specified origin.
@ -123,6 +124,7 @@ class Configuration(dict):
user_directory (Optional[str]): Set the ComfyUI user directory with an absolute path.
log_stdout (bool): Send normal process output to stdout instead of stderr (default)
panic_when (list[str]): List of fully qualified exception class names to panic (sys.exit(1)) when a workflow raises it.
enable_compress_response_body (bool): Enable compressing response body.
"""
def __init__(self, **kwargs):
@ -131,9 +133,11 @@ class Configuration(dict):
self.config_files = []
self.cwd: Optional[str] = None
self.base_paths: list[str] = []
self.base_directory = Optional[str] = None
self.listen: str = "127.0.0.1"
self.port: int = 8188
self.enable_cors_header: Optional[str] = None
self.enable_compress_response_body: bool = False
self.max_upload_size: float = 100.0
self.extra_model_paths_config: Optional[List[str]] = []
self.output_directory: Optional[str] = None

View File

@ -103,9 +103,10 @@ class CLIPTextModel_(torch.nn.Module):
mask = None
if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
causal_mask = torch.full((x.shape[1], x.shape[1]), -torch.finfo(x.dtype).max, dtype=x.dtype, device=x.device).triu_(1)
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
if mask is not None:
mask += causal_mask
else:

View File

@ -65,7 +65,7 @@ def init_default_paths(folder_names_and_paths: FolderNames, configuration: Optio
from ..cmd.main_pre import args
configuration = configuration or args
base_paths = [Path(configuration.cwd) if configuration.cwd is not None else None] + configuration.base_paths
base_paths = [Path(configuration.cwd) if configuration.cwd is not None else None] + [Path(configuration.base_directory) if configuration.base_directory is not None else None] + configuration.base_paths
base_paths = [Path(path) for path in base_paths if path is not None]
if len(base_paths) == 0:
base_paths = [Path(os.getcwd())]

View File

@ -80,6 +80,19 @@ async def cache_control(request: web.Request, handler):
return response
@web.middleware
async def compress_body(request: web.Request, handler):
accept_encoding = request.headers.get("Accept-Encoding", "")
response: web.Response = await handler(request)
if not isinstance(response, web.Response):
return response
if response.content_type not in ["application/json", "text/plain"]:
return response
if response.body and "gzip" in accept_encoding:
response.enable_compression()
return response
def create_cors_middleware(allowed_origin: str):
@web.middleware
async def cors_middleware(request: web.Request, handler):
@ -169,7 +182,8 @@ class PromptServer(ExecutorToClientProgress):
PromptServer.instance = self
mimetypes.init()
mimetypes.types_map['.js'] = 'application/javascript; charset=utf-8'
mimetypes.add_type('application/javascript; charset=utf-8', '.js')
mimetypes.add_type('image/webp', '.webp')
self.address: str = "0.0.0.0"
self.user_manager = UserManager()
@ -188,6 +202,9 @@ class PromptServer(ExecutorToClientProgress):
self.background_tasks: dict[str, Task] = dict()
middlewares = [cache_control]
if args.enable_compress_response_body:
middlewares.append(compress_body)
if args.enable_cors_header:
middlewares.append(create_cors_middleware(args.enable_cors_header))
else:

View File

@ -66,13 +66,26 @@ class IO(StrEnum):
b = frozenset(value.split(","))
return not (b.issubset(a) or a.issubset(b))
class RemoteInputOptions(TypedDict):
route: str
"""The route to the remote source."""
refresh_button: bool
"""Specifies whether to show a refresh button in the UI below the widget."""
control_after_refresh: Literal["first", "last"]
"""Specifies the control after the refresh button is clicked. If "first", the first item will be automatically selected, and so on."""
timeout: int
"""The maximum amount of time to wait for a response from the remote source in milliseconds."""
max_retries: int
"""The maximum number of retries before aborting the request."""
refresh: int
"""The TTL of the remote input's value in milliseconds. Specifies the interval at which the remote input's value is refreshed."""
class InputTypeOptions(TypedDict):
"""Provides type hinting for the return type of the INPUT_TYPES node function.
Due to IDE limitations with unions, for now all options are available for all types (e.g. `label_on` is hinted even when the type is not `IO.BOOLEAN`).
Comfy Docs: https://docs.comfy.org/essentials/custom_node_datatypes
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/datatypes
"""
default: bool | str | float | int | list | tuple
@ -113,6 +126,14 @@ class InputTypeOptions(TypedDict):
# defaultVal: str
dynamicPrompts: bool
"""Causes the front-end to evaluate dynamic prompts (``STRING``)"""
# class InputTypeCombo(InputTypeOptions):
image_upload: bool
"""Specifies whether the input should have an image upload button and image preview attached to it. Requires that the input's name is `image`."""
image_folder: Literal["input", "output", "temp"]
"""Specifies which folder to get preview images from if the input has the ``image_upload`` flag.
"""
remote: RemoteInputOptions
"""Specifies the configuration for a remote input."""
class HiddenInputTypeDict(TypedDict):
@ -133,7 +154,7 @@ class HiddenInputTypeDict(TypedDict):
class InputTypeDict(TypedDict):
"""Provides type hinting for node INPUT_TYPES.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_more_on_inputs
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/more_on_inputs
"""
required: dict[str, tuple[IO, InputTypeOptions]]
@ -143,14 +164,14 @@ class InputTypeDict(TypedDict):
hidden: HiddenInputTypeDict
"""Offers advanced functionality and server-client communication.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_more_on_inputs#hidden-inputs
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/more_on_inputs#hidden-inputs
"""
class ComfyNodeABC(ABC):
"""Abstract base class for Comfy nodes. Includes the names and expected types of attributes.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview
"""
DESCRIPTION: str
@ -167,7 +188,7 @@ class ComfyNodeABC(ABC):
CATEGORY: str
"""The category of the node, as per the "Add Node" menu.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#category
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#category
"""
EXPERIMENTAL: bool
"""Flags a node as experimental, informing users that it may change or not work as expected."""
@ -181,9 +202,9 @@ class ComfyNodeABC(ABC):
* Must include the ``required`` key, which describes all inputs that must be connected for the node to execute.
* The ``optional`` key can be added to describe inputs which do not need to be connected.
* The ``hidden`` key offers some advanced functionality. More info at: https://docs.comfy.org/essentials/custom_node_more_on_inputs#hidden-inputs
* The ``hidden`` key offers some advanced functionality. More info at: https://docs.comfy.org/custom-nodes/backend/more_on_inputs#hidden-inputs
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#input-types
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#input-types
"""
return {"required": {}}
@ -198,7 +219,7 @@ class ComfyNodeABC(ABC):
By default, a node is not considered an output. Set ``OUTPUT_NODE = True`` to specify that it is.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#output-node
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#output-node
"""
INPUT_IS_LIST: bool
"""A flag indicating if this node implements the additional code necessary to deal with OUTPUT_IS_LIST nodes.
@ -209,7 +230,7 @@ class ComfyNodeABC(ABC):
A node can also override the default input behaviour and receive the whole list in a single call. This is done by setting a class attribute `INPUT_IS_LIST` to ``True``.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_lists#list-processing
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing
"""
OUTPUT_IS_LIST: tuple[bool]
"""A tuple indicating which node outputs are lists, but will be connected to nodes that expect individual items.
@ -227,7 +248,7 @@ class ComfyNodeABC(ABC):
the node should provide a class attribute `OUTPUT_IS_LIST`, which is a ``tuple[bool]``, of the same length as `RETURN_TYPES`,
specifying which outputs which should be so treated.
Comfy Docs: https://docs.comfy.org/essentials/custom_node_lists#list-processing
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lists#list-processing
"""
RETURN_TYPES: tuple[IO]
@ -237,19 +258,19 @@ class ComfyNodeABC(ABC):
RETURN_TYPES = (IO.INT, "INT", "CUSTOM_TYPE")
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#return-types
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#return-types
"""
RETURN_NAMES: tuple[str]
"""The output slot names for each item in `RETURN_TYPES`, e.g. ``RETURN_NAMES = ("count", "filter_string")``
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#return-names
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#return-names
"""
OUTPUT_TOOLTIPS: tuple[str]
"""A tuple of strings to use as tooltips for node outputs, one for each item in `RETURN_TYPES`."""
FUNCTION: str
"""The name of the function to execute as a literal string, e.g. `FUNCTION = "execute"`
Comfy Docs: https://docs.comfy.org/essentials/custom_node_server_overview#function
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/server_overview#function
"""
@ -267,7 +288,7 @@ class CheckLazyMixin:
Params should match the nodes execution ``FUNCTION`` (self, and all inputs by name).
Will be executed repeatedly until it returns an empty list, or all requested items were already evaluated (and sent as params).
Comfy Docs: https://docs.comfy.org/essentials/custom_node_lazy_evaluation#defining-check-lazy-status
Comfy Docs: https://docs.comfy.org/custom-nodes/backend/lazy_evaluation#defining-check-lazy-status
"""
need = [name for name in kwargs if kwargs[name] is None]

View File

@ -13,7 +13,7 @@ from typing import Any, NamedTuple, Optional, Iterable
from .platform_path import construct_path
supported_pt_extensions = frozenset(['.ckpt', '.pt', '.bin', '.pth', '.safetensors', '.pkl', '.sft', ".index.json"])
supported_pt_extensions = frozenset(['.ckpt', '.pt', '.pt2', '.bin', '.pth', '.safetensors', '.pkl', '.sft' ".index.json"])
extension_mimetypes_cache = {
"webp": "image",
}

View File

@ -7,7 +7,7 @@ import yaml
def load_extra_path_config(yaml_path):
from .cmd import folder_paths
with open(yaml_path, 'r') as stream:
with open(yaml_path, 'r', encoding='utf-8') as stream:
config = yaml.safe_load(stream)
yaml_dir = os.path.dirname(os.path.abspath(yaml_path))
for c in config:
@ -32,5 +32,6 @@ def load_extra_path_config(yaml_path):
full_path = os.path.join(base_path, full_path)
elif not os.path.isabs(full_path):
full_path = os.path.abspath(os.path.join(yaml_dir, y))
logging.info("Adding extra search path {} {}".format(x, full_path))
folder_paths.add_model_folder_path(x, full_path, is_default=is_default)
normalized_path = os.path.normpath(full_path)
logging.info("Adding extra search path {} {}".format(x, normalized_path))
folder_paths.add_model_folder_path(x, normalized_path, is_default=is_default)

View File

@ -1311,7 +1311,7 @@ def sample_dpmpp_2m_cfg_pp(model, x, sigmas, extra_args=None, callback=None, dis
return x
@torch.no_grad()
def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None, cfg_pp=False):
def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None, eta=1., cfg_pp=False):
extra_args = {} if extra_args is None else extra_args
seed = extra_args.get("seed", None)
noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
@ -1333,53 +1333,60 @@ def res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None
extra_args["model_options"] = model_patcher.set_model_options_post_cfg_function(model_options, post_cfg_function, disable_cfg1_optimization=True)
for i in trange(len(sigmas) - 1, disable=disable):
if s_churn > 0:
gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
sigma_hat = sigmas[i] * (gamma + 1)
else:
gamma = 0
sigma_hat = sigmas[i]
if gamma > 0:
eps = torch.randn_like(x) * s_noise
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = model(x, sigma_hat * s_in, **extra_args)
denoised = model(x, sigmas[i] * s_in, **extra_args)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
if callback is not None:
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
if sigmas[i + 1] == 0 or old_denoised is None:
callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
if sigma_down == 0 or old_denoised is None:
# Euler method
if cfg_pp:
d = to_d(x, sigma_hat, uncond_denoised)
x = denoised + d * sigmas[i + 1]
d = to_d(x, sigmas[i], uncond_denoised)
x = denoised + d * sigma_down
else:
d = to_d(x, sigma_hat, denoised)
dt = sigmas[i + 1] - sigma_hat
d = to_d(x, sigmas[i], denoised)
dt = sigma_down - sigmas[i]
x = x + d * dt
else:
# Second order multistep method in https://arxiv.org/pdf/2308.02157
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigmas[i + 1]), t_fn(sigmas[i - 1])
t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1])
h = t_next - t
c2 = (t_prev - t) / h
phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
b1 = torch.nan_to_num(phi1_val - 1.0 / c2 * phi2_val, nan=0.0)
b2 = torch.nan_to_num(1.0 / c2 * phi2_val, nan=0.0)
b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
b2 = torch.nan_to_num(phi2_val / c2, nan=0.0)
if cfg_pp:
x = x + (denoised - uncond_denoised)
x = sigma_fn(h) * x + h * (b1 * uncond_denoised + b2 * old_denoised)
else:
x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised)
x = (sigma_fn(t_next) / sigma_fn(t)) * x + h * (b1 * denoised + b2 * old_denoised)
# Noise addition
if sigmas[i + 1] > 0:
x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
old_denoised = denoised
if cfg_pp:
old_denoised = uncond_denoised
else:
old_denoised = denoised
return x
@torch.no_grad()
def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=False)
def sample_res_multistep(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=False)
@torch.no_grad()
def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_churn=0., s_tmin=0., s_tmax=float('inf'), s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise, noise_sampler=noise_sampler, cfg_pp=True)
def sample_res_multistep_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=True)
@torch.no_grad()
def sample_res_multistep_ancestral(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=False)
@torch.no_grad()
def sample_res_multistep_ancestral_cfg_pp(model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1., s_noise=1., noise_sampler=None):
return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=eta, cfg_pp=True)
@torch.no_grad()
def sample_gradient_estimation(model, x, sigmas, extra_args=None, callback=None, disable=None, ge_gamma=2.):

View File

@ -22,7 +22,7 @@ def attention(q: Tensor, k: Tensor, v: Tensor, pe: Tensor, mask=None) -> Tensor:
def rope(pos: Tensor, dim: int, theta: int) -> Tensor:
assert dim % 2 == 0
if model_management.is_device_mps(pos.device) or model_management.is_intel_xpu():
if model_management.is_device_mps(pos.device) or model_management.is_intel_xpu() or model_management.is_directml_enabled():
device = torch.device("cpu")
else:
device = pos.device

View File

@ -301,7 +301,7 @@ class HunyuanVideo(nn.Module):
shape[i] = shape[i] // self.patch_size[i]
img = img.reshape([img.shape[0]] + shape + [self.out_channels] + self.patch_size)
img = img.permute(0, 4, 1, 5, 2, 6, 3, 7)
img = img.reshape(initial_shape)
img = img.reshape(initial_shape[0], self.out_channels, initial_shape[2], initial_shape[3], initial_shape[4])
return img
def forward(self, x, timestep, context, y, guidance=None, attention_mask=None, control=None, transformer_options={}, **kwargs):

622
comfy/ldm/lumina/model.py Normal file
View File

@ -0,0 +1,622 @@
# Code from: https://github.com/Alpha-VLLM/Lumina-Image-2.0/blob/main/models/model.py
from __future__ import annotations
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import comfy.ldm.common_dit
from comfy.ldm.modules.diffusionmodules.mmdit import TimestepEmbedder, RMSNorm
from comfy.ldm.modules.attention import optimized_attention_masked
from comfy.ldm.flux.layers import EmbedND
def modulate(x, scale):
return x * (1 + scale.unsqueeze(1))
#############################################################################
# Core NextDiT Model #
#############################################################################
class JointAttention(nn.Module):
"""Multi-head attention module."""
def __init__(
self,
dim: int,
n_heads: int,
n_kv_heads: Optional[int],
qk_norm: bool,
operation_settings={},
):
"""
Initialize the Attention module.
Args:
dim (int): Number of input dimensions.
n_heads (int): Number of heads.
n_kv_heads (Optional[int]): Number of kv heads, if using GQA.
"""
super().__init__()
self.n_kv_heads = n_heads if n_kv_heads is None else n_kv_heads
self.n_local_heads = n_heads
self.n_local_kv_heads = self.n_kv_heads
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = dim // n_heads
self.qkv = operation_settings.get("operations").Linear(
dim,
(n_heads + self.n_kv_heads + self.n_kv_heads) * self.head_dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.out = operation_settings.get("operations").Linear(
n_heads * self.head_dim,
dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
if qk_norm:
self.q_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings)
self.k_norm = RMSNorm(self.head_dim, elementwise_affine=True, **operation_settings)
else:
self.q_norm = self.k_norm = nn.Identity()
@staticmethod
def apply_rotary_emb(
x_in: torch.Tensor,
freqs_cis: torch.Tensor,
) -> torch.Tensor:
"""
Apply rotary embeddings to input tensors using the given frequency
tensor.
This function applies rotary embeddings to the given query 'xq' and
key 'xk' tensors using the provided frequency tensor 'freqs_cis'. The
input tensors are reshaped as complex numbers, and the frequency tensor
is reshaped for broadcasting compatibility. The resulting tensors
contain rotary embeddings and are returned as real tensors.
Args:
x_in (torch.Tensor): Query or Key tensor to apply rotary embeddings.
freqs_cis (torch.Tensor): Precomputed frequency tensor for complex
exponentials.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor
and key tensor with rotary embeddings.
"""
t_ = x_in.reshape(*x_in.shape[:-1], -1, 1, 2)
t_out = freqs_cis[..., 0] * t_[..., 0] + freqs_cis[..., 1] * t_[..., 1]
return t_out.reshape(*x_in.shape)
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
freqs_cis: torch.Tensor,
) -> torch.Tensor:
"""
Args:
x:
x_mask:
freqs_cis:
Returns:
"""
bsz, seqlen, _ = x.shape
xq, xk, xv = torch.split(
self.qkv(x),
[
self.n_local_heads * self.head_dim,
self.n_local_kv_heads * self.head_dim,
self.n_local_kv_heads * self.head_dim,
],
dim=-1,
)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq = self.q_norm(xq)
xk = self.k_norm(xk)
xq = JointAttention.apply_rotary_emb(xq, freqs_cis=freqs_cis)
xk = JointAttention.apply_rotary_emb(xk, freqs_cis=freqs_cis)
n_rep = self.n_local_heads // self.n_local_kv_heads
if n_rep >= 1:
xk = xk.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
xv = xv.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
output = optimized_attention_masked(xq.movedim(1, 2), xk.movedim(1, 2), xv.movedim(1, 2), self.n_local_heads, x_mask, skip_reshape=True)
return self.out(output)
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
ffn_dim_multiplier: Optional[float],
operation_settings={},
):
"""
Initialize the FeedForward module.
Args:
dim (int): Input dimension.
hidden_dim (int): Hidden dimension of the feedforward layer.
multiple_of (int): Value to ensure hidden dimension is a multiple
of this value.
ffn_dim_multiplier (float, optional): Custom multiplier for hidden
dimension. Defaults to None.
"""
super().__init__()
# custom dim factor multiplier
if ffn_dim_multiplier is not None:
hidden_dim = int(ffn_dim_multiplier * hidden_dim)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
self.w1 = operation_settings.get("operations").Linear(
dim,
hidden_dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.w2 = operation_settings.get("operations").Linear(
hidden_dim,
dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.w3 = operation_settings.get("operations").Linear(
dim,
hidden_dim,
bias=False,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
# @torch.compile
def _forward_silu_gating(self, x1, x3):
return F.silu(x1) * x3
def forward(self, x):
return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x)))
class JointTransformerBlock(nn.Module):
def __init__(
self,
layer_id: int,
dim: int,
n_heads: int,
n_kv_heads: int,
multiple_of: int,
ffn_dim_multiplier: float,
norm_eps: float,
qk_norm: bool,
modulation=True,
operation_settings={},
) -> None:
"""
Initialize a TransformerBlock.
Args:
layer_id (int): Identifier for the layer.
dim (int): Embedding dimension of the input features.
n_heads (int): Number of attention heads.
n_kv_heads (Optional[int]): Number of attention heads in key and
value features (if using GQA), or set to None for the same as
query.
multiple_of (int):
ffn_dim_multiplier (float):
norm_eps (float):
"""
super().__init__()
self.dim = dim
self.head_dim = dim // n_heads
self.attention = JointAttention(dim, n_heads, n_kv_heads, qk_norm, operation_settings=operation_settings)
self.feed_forward = FeedForward(
dim=dim,
hidden_dim=4 * dim,
multiple_of=multiple_of,
ffn_dim_multiplier=ffn_dim_multiplier,
operation_settings=operation_settings,
)
self.layer_id = layer_id
self.attention_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.ffn_norm1 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.attention_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.ffn_norm2 = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.modulation = modulation
if modulation:
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
operation_settings.get("operations").Linear(
min(dim, 1024),
4 * dim,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
),
)
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
freqs_cis: torch.Tensor,
adaln_input: Optional[torch.Tensor]=None,
):
"""
Perform a forward pass through the TransformerBlock.
Args:
x (torch.Tensor): Input tensor.
freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
Returns:
torch.Tensor: Output tensor after applying attention and
feedforward layers.
"""
if self.modulation:
assert adaln_input is not None
scale_msa, gate_msa, scale_mlp, gate_mlp = self.adaLN_modulation(adaln_input).chunk(4, dim=1)
x = x + gate_msa.unsqueeze(1).tanh() * self.attention_norm2(
self.attention(
modulate(self.attention_norm1(x), scale_msa),
x_mask,
freqs_cis,
)
)
x = x + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(
self.feed_forward(
modulate(self.ffn_norm1(x), scale_mlp),
)
)
else:
assert adaln_input is None
x = x + self.attention_norm2(
self.attention(
self.attention_norm1(x),
x_mask,
freqs_cis,
)
)
x = x + self.ffn_norm2(
self.feed_forward(
self.ffn_norm1(x),
)
)
return x
class FinalLayer(nn.Module):
"""
The final layer of NextDiT.
"""
def __init__(self, hidden_size, patch_size, out_channels, operation_settings={}):
super().__init__()
self.norm_final = operation_settings.get("operations").LayerNorm(
hidden_size,
elementwise_affine=False,
eps=1e-6,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.linear = operation_settings.get("operations").Linear(
hidden_size,
patch_size * patch_size * out_channels,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
operation_settings.get("operations").Linear(
min(hidden_size, 1024),
hidden_size,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
),
)
def forward(self, x, c):
scale = self.adaLN_modulation(c)
x = modulate(self.norm_final(x), scale)
x = self.linear(x)
return x
class NextDiT(nn.Module):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
patch_size: int = 2,
in_channels: int = 4,
dim: int = 4096,
n_layers: int = 32,
n_refiner_layers: int = 2,
n_heads: int = 32,
n_kv_heads: Optional[int] = None,
multiple_of: int = 256,
ffn_dim_multiplier: Optional[float] = None,
norm_eps: float = 1e-5,
qk_norm: bool = False,
cap_feat_dim: int = 5120,
axes_dims: List[int] = (16, 56, 56),
axes_lens: List[int] = (1, 512, 512),
image_model=None,
device=None,
dtype=None,
operations=None,
) -> None:
super().__init__()
self.dtype = dtype
operation_settings = {"operations": operations, "device": device, "dtype": dtype}
self.in_channels = in_channels
self.out_channels = in_channels
self.patch_size = patch_size
self.x_embedder = operation_settings.get("operations").Linear(
in_features=patch_size * patch_size * in_channels,
out_features=dim,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
)
self.noise_refiner = nn.ModuleList(
[
JointTransformerBlock(
layer_id,
dim,
n_heads,
n_kv_heads,
multiple_of,
ffn_dim_multiplier,
norm_eps,
qk_norm,
modulation=True,
operation_settings=operation_settings,
)
for layer_id in range(n_refiner_layers)
]
)
self.context_refiner = nn.ModuleList(
[
JointTransformerBlock(
layer_id,
dim,
n_heads,
n_kv_heads,
multiple_of,
ffn_dim_multiplier,
norm_eps,
qk_norm,
modulation=False,
operation_settings=operation_settings,
)
for layer_id in range(n_refiner_layers)
]
)
self.t_embedder = TimestepEmbedder(min(dim, 1024), **operation_settings)
self.cap_embedder = nn.Sequential(
RMSNorm(cap_feat_dim, eps=norm_eps, elementwise_affine=True, **operation_settings),
operation_settings.get("operations").Linear(
cap_feat_dim,
dim,
bias=True,
device=operation_settings.get("device"),
dtype=operation_settings.get("dtype"),
),
)
self.layers = nn.ModuleList(
[
JointTransformerBlock(
layer_id,
dim,
n_heads,
n_kv_heads,
multiple_of,
ffn_dim_multiplier,
norm_eps,
qk_norm,
operation_settings=operation_settings,
)
for layer_id in range(n_layers)
]
)
self.norm_final = RMSNorm(dim, eps=norm_eps, elementwise_affine=True, **operation_settings)
self.final_layer = FinalLayer(dim, patch_size, self.out_channels, operation_settings=operation_settings)
assert (dim // n_heads) == sum(axes_dims)
self.axes_dims = axes_dims
self.axes_lens = axes_lens
self.rope_embedder = EmbedND(dim=dim // n_heads, theta=10000.0, axes_dim=axes_dims)
self.dim = dim
self.n_heads = n_heads
def unpatchify(
self, x: torch.Tensor, img_size: List[Tuple[int, int]], cap_size: List[int], return_tensor=False
) -> List[torch.Tensor]:
"""
x: (N, T, patch_size**2 * C)
imgs: (N, H, W, C)
"""
pH = pW = self.patch_size
imgs = []
for i in range(x.size(0)):
H, W = img_size[i]
begin = cap_size[i]
end = begin + (H // pH) * (W // pW)
imgs.append(
x[i][begin:end]
.view(H // pH, W // pW, pH, pW, self.out_channels)
.permute(4, 0, 2, 1, 3)
.flatten(3, 4)
.flatten(1, 2)
)
if return_tensor:
imgs = torch.stack(imgs, dim=0)
return imgs
def patchify_and_embed(
self, x: List[torch.Tensor] | torch.Tensor, cap_feats: torch.Tensor, cap_mask: torch.Tensor, t: torch.Tensor, num_tokens
) -> Tuple[torch.Tensor, torch.Tensor, List[Tuple[int, int]], List[int], torch.Tensor]:
bsz = len(x)
pH = pW = self.patch_size
device = x[0].device
dtype = x[0].dtype
if cap_mask is not None:
l_effective_cap_len = cap_mask.sum(dim=1).tolist()
else:
l_effective_cap_len = [num_tokens] * bsz
if cap_mask is not None and not torch.is_floating_point(cap_mask):
cap_mask = (cap_mask - 1).to(dtype) * torch.finfo(dtype).max
img_sizes = [(img.size(1), img.size(2)) for img in x]
l_effective_img_len = [(H // pH) * (W // pW) for (H, W) in img_sizes]
max_seq_len = max(
(cap_len+img_len for cap_len, img_len in zip(l_effective_cap_len, l_effective_img_len))
)
max_cap_len = max(l_effective_cap_len)
max_img_len = max(l_effective_img_len)
position_ids = torch.zeros(bsz, max_seq_len, 3, dtype=torch.int32, device=device)
for i in range(bsz):
cap_len = l_effective_cap_len[i]
img_len = l_effective_img_len[i]
H, W = img_sizes[i]
H_tokens, W_tokens = H // pH, W // pW
assert H_tokens * W_tokens == img_len
position_ids[i, :cap_len, 0] = torch.arange(cap_len, dtype=torch.int32, device=device)
position_ids[i, cap_len:cap_len+img_len, 0] = cap_len
row_ids = torch.arange(H_tokens, dtype=torch.int32, device=device).view(-1, 1).repeat(1, W_tokens).flatten()
col_ids = torch.arange(W_tokens, dtype=torch.int32, device=device).view(1, -1).repeat(H_tokens, 1).flatten()
position_ids[i, cap_len:cap_len+img_len, 1] = row_ids
position_ids[i, cap_len:cap_len+img_len, 2] = col_ids
freqs_cis = self.rope_embedder(position_ids).movedim(1, 2).to(dtype)
# build freqs_cis for cap and image individually
cap_freqs_cis_shape = list(freqs_cis.shape)
# cap_freqs_cis_shape[1] = max_cap_len
cap_freqs_cis_shape[1] = cap_feats.shape[1]
cap_freqs_cis = torch.zeros(*cap_freqs_cis_shape, device=device, dtype=freqs_cis.dtype)
img_freqs_cis_shape = list(freqs_cis.shape)
img_freqs_cis_shape[1] = max_img_len
img_freqs_cis = torch.zeros(*img_freqs_cis_shape, device=device, dtype=freqs_cis.dtype)
for i in range(bsz):
cap_len = l_effective_cap_len[i]
img_len = l_effective_img_len[i]
cap_freqs_cis[i, :cap_len] = freqs_cis[i, :cap_len]
img_freqs_cis[i, :img_len] = freqs_cis[i, cap_len:cap_len+img_len]
# refine context
for layer in self.context_refiner:
cap_feats = layer(cap_feats, cap_mask, cap_freqs_cis)
# refine image
flat_x = []
for i in range(bsz):
img = x[i]
C, H, W = img.size()
img = img.view(C, H // pH, pH, W // pW, pW).permute(1, 3, 2, 4, 0).flatten(2).flatten(0, 1)
flat_x.append(img)
x = flat_x
padded_img_embed = torch.zeros(bsz, max_img_len, x[0].shape[-1], device=device, dtype=x[0].dtype)
padded_img_mask = torch.zeros(bsz, max_img_len, dtype=dtype, device=device)
for i in range(bsz):
padded_img_embed[i, :l_effective_img_len[i]] = x[i]
padded_img_mask[i, l_effective_img_len[i]:] = -torch.finfo(dtype).max
padded_img_embed = self.x_embedder(padded_img_embed)
padded_img_mask = padded_img_mask.unsqueeze(1)
for layer in self.noise_refiner:
padded_img_embed = layer(padded_img_embed, padded_img_mask, img_freqs_cis, t)
if cap_mask is not None:
mask = torch.zeros(bsz, max_seq_len, dtype=dtype, device=device)
mask[:, :max_cap_len] = cap_mask[:, :max_cap_len]
else:
mask = None
padded_full_embed = torch.zeros(bsz, max_seq_len, self.dim, device=device, dtype=x[0].dtype)
for i in range(bsz):
cap_len = l_effective_cap_len[i]
img_len = l_effective_img_len[i]
padded_full_embed[i, :cap_len] = cap_feats[i, :cap_len]
padded_full_embed[i, cap_len:cap_len+img_len] = padded_img_embed[i, :img_len]
return padded_full_embed, mask, img_sizes, l_effective_cap_len, freqs_cis
# def forward(self, x, t, cap_feats, cap_mask):
def forward(self, x, timesteps, context, num_tokens, attention_mask=None, **kwargs):
t = 1.0 - timesteps
cap_feats = context
cap_mask = attention_mask
bs, c, h, w = x.shape
x = comfy.ldm.common_dit.pad_to_patch_size(x, (self.patch_size, self.patch_size))
"""
Forward pass of NextDiT.
t: (N,) tensor of diffusion timesteps
y: (N,) tensor of text tokens/features
"""
t = self.t_embedder(t, dtype=x.dtype) # (N, D)
adaln_input = t
cap_feats = self.cap_embedder(cap_feats) # (N, L, D) # todo check if able to batchify w.o. redundant compute
x_is_tensor = isinstance(x, torch.Tensor)
x, mask, img_size, cap_size, freqs_cis = self.patchify_and_embed(x, cap_feats, cap_mask, t, num_tokens)
freqs_cis = freqs_cis.to(x.device)
for layer in self.layers:
x = layer(x, mask, freqs_cis, adaln_input)
x = self.final_layer(x, adaln_input)
x = self.unpatchify(x, img_size, cap_size, return_tensor=x_is_tensor)[:,:,:h,:w]
return -x

View File

@ -30,11 +30,12 @@ FORCE_UPCAST_ATTENTION_DTYPE = model_management.force_upcast_attention_dtype()
logger = logging.getLogger(__name__)
def get_attn_precision(attn_precision):
def get_attn_precision(attn_precision, current_dtype):
if args.dont_upcast_attention:
return None
if FORCE_UPCAST_ATTENTION_DTYPE is not None:
return FORCE_UPCAST_ATTENTION_DTYPE
if FORCE_UPCAST_ATTENTION_DTYPE is not None and current_dtype in FORCE_UPCAST_ATTENTION_DTYPE:
return FORCE_UPCAST_ATTENTION_DTYPE[current_dtype]
return attn_precision
@ -52,17 +53,6 @@ def default(val, d):
return d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# feedforward
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops):
@ -99,7 +89,7 @@ def Normalize(in_channels, dtype=None, device=None):
def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
attn_precision = get_attn_precision(attn_precision)
attn_precision = get_attn_precision(attn_precision, q.dtype)
if skip_reshape:
b, _, _, dim_head = q.shape
@ -168,7 +158,7 @@ def attention_basic(q, k, v, heads, mask=None, attn_precision=None, skip_reshape
def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
attn_precision = get_attn_precision(attn_precision)
attn_precision = get_attn_precision(attn_precision, query.dtype)
if skip_reshape:
b, _, _, dim_head = query.shape
@ -238,7 +228,7 @@ def attention_sub_quad(query, key, value, heads, mask=None, attn_precision=None,
def attention_split(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
attn_precision = get_attn_precision(attn_precision)
attn_precision = get_attn_precision(attn_precision, q.dtype)
if skip_reshape:
b, _, _, dim_head = q.shape
@ -430,6 +420,7 @@ def pytorch_style_decl(func):
:param func:
:return:
"""
@wraps(func)
def wrapper(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
if skip_reshape:
@ -487,12 +478,12 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha
m = mask
if mask is not None:
if mask.shape[0] > 1:
m = mask[i : i + SDP_BATCH_LIMIT]
m = mask[i: i + SDP_BATCH_LIMIT]
out[i : i + SDP_BATCH_LIMIT] = torch.nn.functional.scaled_dot_product_attention(
q[i : i + SDP_BATCH_LIMIT],
k[i : i + SDP_BATCH_LIMIT],
v[i : i + SDP_BATCH_LIMIT],
out[i: i + SDP_BATCH_LIMIT] = torch.nn.functional.scaled_dot_product_attention(
q[i: i + SDP_BATCH_LIMIT],
k[i: i + SDP_BATCH_LIMIT],
v[i: i + SDP_BATCH_LIMIT],
attn_mask=m,
dropout_p=0.0, is_causal=False
).transpose(1, 2).reshape(-1, q.shape[2], heads * dim_head)
@ -502,7 +493,7 @@ def attention_pytorch(q, k, v, heads, mask=None, attn_precision=None, skip_resha
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False):
if skip_reshape:
b, _, _, dim_head = q.shape
tensor_layout="HND"
tensor_layout = "HND"
else:
b, _, dim_head = q.shape
dim_head //= heads
@ -510,7 +501,7 @@ def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=
lambda t: t.view(b, -1, heads, dim_head),
(q, k, v),
)
tensor_layout="NHD"
tensor_layout = "NHD"
if mask is not None:
# add a batch dimension if there isn't already one

View File

@ -323,7 +323,7 @@ class SelfAttention(nn.Module):
class RMSNorm(torch.nn.Module):
def __init__(
self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None
self, dim: int, elementwise_affine: bool = False, eps: float = 1e-6, device=None, dtype=None, **kwargs
):
"""
Initialize the RMSNorm normalization layer.

View File

@ -305,7 +305,7 @@ def vae_attention():
if model_management.xformers_enabled_vae():
logging.debug("Using xformers attention in VAE")
return xformers_attention
elif model_management.pytorch_attention_enabled():
elif model_management.pytorch_attention_enabled_vae():
logging.debug("Using pytorch attention in VAE")
return pytorch_attention
else:

View File

@ -309,7 +309,7 @@ def model_lora_keys_unet(model, key_map=None):
if k.endswith(".weight"):
key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
key_map["lora_unet_{}".format(key_lora)] = k
key_map["lora_prior_unet_{}".format(key_lora)] = k # cascade lora: TODO put lora key prefix in the model config
key_map["{}".format(k[:-len(".weight")])] = k # generic lora format without any weird key names
else:
key_map["{}".format(k)] = k # generic lora format for not .weight without any weird key names
@ -329,6 +329,13 @@ def model_lora_keys_unet(model, key_map=None):
diffusers_lora_key = diffusers_lora_key[:-2]
key_map[diffusers_lora_key] = unet_key
if isinstance(model, model_base.StableCascade_C):
for k in sdk:
if k.startswith("diffusion_model."):
if k.endswith(".weight"):
key_lora = k[len("diffusion_model."):-len(".weight")].replace(".", "_")
key_map["lora_prior_unet_{}".format(key_lora)] = k
if isinstance(model, model_base.SD3): # Diffusers lora SD3
diffusers_keys = utils.mmdit_to_diffusers(model.model_config.unet_config, output_prefix="diffusion_model.")
for k in diffusers_keys:

View File

@ -39,6 +39,7 @@ from .ldm.genmo.joint_model.asymm_models_joint import AsymmDiTJoint
from .ldm.hunyuan_video.model import HunyuanVideo as HunyuanVideoModel
from .ldm.hydit.models import HunYuanDiT
from .ldm.lightricks.model import LTXVModel
from .ldm.lumina.model import NextDiT
from .ldm.modules.diffusionmodules.mmdit import OpenAISignatureMMDITWrapper
from .ldm.modules.diffusionmodules.openaimodel import UNetModel, Timestep
from .ldm.modules.diffusionmodules.upscaling import ImageConcatWithNoiseAugmentation
@ -181,9 +182,6 @@ class BaseModel(torch.nn.Module):
def get_dtype(self):
return self.diffusion_model.dtype
def is_adm(self):
return self.adm_channels > 0
def encode_adm(self, **kwargs):
return None
@ -908,6 +906,15 @@ class HunyuanVideo(BaseModel):
if cross_attn is not None:
out['c_crossattn'] = conds.CONDRegular(cross_attn)
image = kwargs.get("concat_latent_image", None)
noise = kwargs.get("noise", None)
if image is not None:
padding_shape = (noise.shape[0], 16, noise.shape[2] - 1, noise.shape[3], noise.shape[4])
latent_padding = torch.zeros(padding_shape, device=noise.device, dtype=noise.dtype)
image_latents = torch.cat([image.to(noise), latent_padding], dim=2)
out['c_concat'] = conds.CONDNoiseShape(self.process_latent_in(image_latents))
guidance = kwargs.get("guidance", 6.0)
if guidance is not None:
out['guidance'] = conds.CONDRegular(torch.FloatTensor([guidance]))
@ -940,3 +947,19 @@ class CosmosVideo(BaseModel):
latent_image = latent_image + noise
latent_image = self.model_sampling.calculate_input(torch.tensor([sigma_noise_augmentation], device=latent_image.device, dtype=latent_image.dtype), latent_image)
return latent_image * ((sigma ** 2 + self.model_sampling.sigma_data ** 2) ** 0.5)
class Lumina2(BaseModel):
def __init__(self, model_config, model_type=ModelType.FLOW, device=None):
super().__init__(model_config, model_type, device=device, unet_model=NextDiT)
def extra_conds(self, **kwargs):
out = super().extra_conds(**kwargs)
attention_mask = kwargs.get("attention_mask", None)
if attention_mask is not None:
if torch.numel(attention_mask) != attention_mask.sum():
out['attention_mask'] = conds.CONDRegular(attention_mask)
out['num_tokens'] = conds.CONDConstant(max(1, torch.sum(attention_mask).item()))
cross_attn = kwargs.get("cross_attn", None)
if cross_attn is not None:
out['c_crossattn'] = conds.CONDRegular(cross_attn)
return out

View File

@ -140,7 +140,7 @@ def detect_unet_config(state_dict, key_prefix):
if '{}txt_in.individual_token_refiner.blocks.0.norm1.weight'.format(key_prefix) in state_dict_keys: #Hunyuan Video
dit_config = {}
dit_config["image_model"] = "hunyuan_video"
dit_config["in_channels"] = 16
dit_config["in_channels"] = state_dict['{}img_in.proj.weight'.format(key_prefix)].shape[1] #SkyReels img2video has 32 input channels
dit_config["patch_size"] = [1, 2, 2]
dit_config["out_channels"] = 16
dit_config["vec_in_dim"] = 768
@ -243,7 +243,7 @@ def detect_unet_config(state_dict, key_prefix):
dit_config["micro_condition"] = False
return dit_config
if '{}blocks.block0.blocks.0.block.attn.to_q.0.weight'.format(key_prefix) in state_dict_keys:
if '{}blocks.block0.blocks.0.block.attn.to_q.0.weight'.format(key_prefix) in state_dict_keys: # Cosmos
dit_config = {}
dit_config["image_model"] = "cosmos"
dit_config["max_img_h"] = 240
@ -288,6 +288,21 @@ def detect_unet_config(state_dict, key_prefix):
dit_config["extra_per_block_abs_pos_emb_type"] = "learnable"
return dit_config
if '{}cap_embedder.1.weight'.format(key_prefix) in state_dict_keys: # Lumina 2
dit_config = {}
dit_config["image_model"] = "lumina2"
dit_config["patch_size"] = 2
dit_config["in_channels"] = 16
dit_config["dim"] = 2304
dit_config["cap_feat_dim"] = 2304
dit_config["n_layers"] = 26
dit_config["n_heads"] = 24
dit_config["n_kv_heads"] = 8
dit_config["qk_norm"] = True
dit_config["axes_dims"] = [32, 32, 32]
dit_config["axes_lens"] = [300, 512, 512]
return dit_config
if '{}input_blocks.0.0.weight'.format(key_prefix) not in state_dict_keys:
return None

View File

@ -72,7 +72,9 @@ xpu_available = False
torch_version = ""
try:
torch_version = torch.version.__version__
xpu_available = (int(torch_version[0]) < 2 or (int(torch_version[0]) == 2 and int(torch_version[2]) <= 4)) and torch.xpu.is_available()
temp = torch_version.split(".")
torch_version_numeric = (int(temp[0]), int(temp[1]))
xpu_available = (torch_version_numeric[0] < 2 or (torch_version_numeric[0] == 2 and torch_version_numeric[1] <= 4)) and torch.xpu.is_available()
except:
pass
@ -259,7 +261,7 @@ def is_amd():
MIN_WEIGHT_MEMORY_RATIO = 0.4
if is_nvidia():
MIN_WEIGHT_MEMORY_RATIO = 0.1
MIN_WEIGHT_MEMORY_RATIO = 0.0
ENABLE_PYTORCH_ATTENTION = False
if args.use_pytorch_cross_attention:
@ -268,7 +270,7 @@ if args.use_pytorch_cross_attention:
try:
if is_nvidia() or is_amd():
if int(torch_version[0]) >= 2:
if torch_version_numeric[0] >= 2:
if ENABLE_PYTORCH_ATTENTION == False and args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
ENABLE_PYTORCH_ATTENTION = True
if is_intel_xpu() or is_ascend_npu():
@ -277,13 +279,32 @@ try:
except:
pass
try:
if is_amd():
arch = torch.cuda.get_device_properties(get_torch_device()).gcnArchName
logging.info("AMD arch: {}".format(arch))
if args.use_split_cross_attention == False and args.use_quad_cross_attention == False:
if torch_version_numeric[0] >= 2 and torch_version_numeric[1] >= 7: # works on 2.6 but doesn't actually seem to improve much
if any((a in arch) for a in ["gfx1100", "gfx1101"]): # TODO: more arches
ENABLE_PYTORCH_ATTENTION = True
except:
pass
if ENABLE_PYTORCH_ATTENTION:
torch.backends.cuda.enable_math_sdp(True)
torch.backends.cuda.enable_flash_sdp(True)
torch.backends.cuda.enable_mem_efficient_sdp(True)
PRIORITIZE_FP16 = False # TODO: remove and replace with something that shows exactly which dtype is faster than the other
try:
if int(torch_version[0]) == 2 and int(torch_version[2]) >= 5:
if is_nvidia() and args.fast:
torch.backends.cuda.matmul.allow_fp16_accumulation = True
PRIORITIZE_FP16 = True # TODO: limit to cards where it actually boosts performance
except:
pass
try:
if torch_version_numeric[0] == 2 and torch_version_numeric[1] >= 5:
torch.backends.cuda.allow_fp16_bf16_reduction_math_sdp(True) # pylint: disable=no-member
except:
logging.warning("Warning, could not set allow_fp16_bf16_reduction_math_sdp")
@ -629,7 +650,6 @@ def _load_models_gpu(models: Sequence[ModelManageable], memory_required: int = 0
current_loaded_models.insert(0, loaded_model)
logger.debug(f"Loaded {loaded_model}")
span = get_current_span()
span.set_attribute("models_to_load", list(map(str, models_to_load)))
span.set_attribute("models_freed", list(map(str, models_freed)))
@ -759,6 +779,10 @@ def unet_dtype(device=None, model_params=0, supported_dtypes=(torch.float16, tor
if model_params * 2 > free_model_memory:
return fp8_dtype
if PRIORITIZE_FP16:
if torch.float16 in supported_dtypes and should_use_fp16(device=device, model_params=model_params):
return torch.float16
for dt in supported_dtypes:
if dt == torch.float16 and should_use_fp16(device=device, model_params=model_params):
if torch.float16 in supported_dtypes:
@ -1037,6 +1061,12 @@ def pytorch_attention_enabled():
return ENABLE_PYTORCH_ATTENTION
def pytorch_attention_enabled_vae():
if is_amd():
return False # enabling pytorch attention on AMD currently causes crash when doing high res
return pytorch_attention_enabled()
def pytorch_attention_flash_attention():
global ENABLE_PYTORCH_ATTENTION
if ENABLE_PYTORCH_ATTENTION:
@ -1047,6 +1077,8 @@ def pytorch_attention_flash_attention():
return True
if is_ascend_npu():
return True
if is_amd():
return True # if you have pytorch attention enabled on AMD it probably supports at least mem efficient attention
return False
@ -1061,11 +1093,11 @@ def force_upcast_attention_dtype():
upcast = args.force_upcast_attention
macos_version = mac_version()
if macos_version is not None and ((14, 5) <= macos_version <= (15, 2)): # black image bug on recent versions of macOS
if macos_version is not None and ((14, 5) <= macos_version < (16,)): # black image bug on recent versions of macOS
upcast = True
if upcast:
return torch.float32
return {torch.float16: torch.float32}
else:
return None
@ -1139,21 +1171,27 @@ def is_device_cuda(device):
return is_device_type(device, 'cuda')
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
global directml_device
def is_directml_enabled():
global directml_enabled
if directml_enabled:
return True
return False
def should_use_fp16(device=None, model_params=0, prioritize_performance=True, manual_cast=False):
if device is not None:
if is_device_cpu(device):
return False
if FORCE_FP16:
if args.force_fp16:
return True
if FORCE_FP32:
return False
if directml_device:
return False
if is_directml_enabled():
return True
if (device is not None and is_device_mps(device)) or mps_mode():
return True
@ -1234,6 +1272,16 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
if is_intel_xpu():
return True
if is_ascend_npu():
return True
if is_amd():
arch = torch.cuda.get_device_properties(device).gcnArchName
if any((a in arch) for a in ["gfx1030", "gfx1031", "gfx1010", "gfx1011", "gfx1012", "gfx906", "gfx900", "gfx803"]): # RDNA2 and older don't support bf16
if manual_cast:
return True
return False
try:
props_major = min(torch.cuda.get_device_properties(torch.device(f"cuda:{i}")).major for i in range(torch.cuda.device_count()))
if props_major >= 8:
@ -1247,7 +1295,7 @@ def should_use_bf16(device=None, model_params=0, prioritize_performance=True, ma
bf16_works = torch.cuda.is_bf16_supported()
if bf16_works or manual_cast:
if bf16_works and manual_cast:
free_model_memory = maximum_vram_for_weights(device)
if (not prioritize_performance) or model_params * 4 > free_model_memory:
return True
@ -1271,11 +1319,11 @@ def supports_fp8_compute(device=None):
if props.minor < 9:
return False
if int(torch_version[0]) < 2 or (int(torch_version[0]) == 2 and int(torch_version[2]) < 3):
if torch_version_numeric[0] < 2 or (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 3):
return False
if WINDOWS:
if (int(torch_version[0]) == 2 and int(torch_version[2]) < 4):
if (torch_version_numeric[0] == 2 and torch_version_numeric[1] < 4):
return False
return True

View File

@ -126,6 +126,16 @@ class ModelManageable(Protocol):
self.unpatch_model(self.offload_device, unpatch_weights=unpatch_all)
return self.model
def set_model_compute_dtype(self, dtype: torch.dtype):
pass
def add_weight_wrapper(self, name, function):
pass
@property
def force_cast_weights(self) -> bool:
return False
@dataclasses.dataclass
class MemoryMeasurements:

View File

@ -112,8 +112,28 @@ def wipe_lowvram_weight(m):
if hasattr(m, "prev_comfy_cast_weights"):
m.comfy_cast_weights = m.prev_comfy_cast_weights
del m.prev_comfy_cast_weights
m.weight_function = None
m.bias_function = None
if hasattr(m, "weight_function"):
m.weight_function = []
if hasattr(m, "bias_function"):
m.bias_function = []
def move_weight_functions(m, device):
if device is None:
return 0
memory = 0
if hasattr(m, "weight_function"):
for f in m.weight_function:
if hasattr(f, "move_to"):
memory += f.move_to(device=device)
if hasattr(m, "bias_function"):
for f in m.bias_function:
if hasattr(f, "move_to"):
memory += f.move_to(device=device)
return memory
class LowVramPatch:
@ -207,11 +227,13 @@ class ModelPatcher(ModelManageable):
self.backup = {}
self.object_patches = {}
self.object_patches_backup = {}
self.weight_wrapper_patches = {}
self._model_options: ModelOptions = {"transformer_options": {}}
self.model_size()
self.load_device = load_device
self.offload_device = offload_device
self.weight_inplace_update = weight_inplace_update
self._force_cast_weights = False
self._parent: ModelManageable | None = None
self.patches_uuid: uuid.UUID = uuid.uuid4()
self.ckpt_name = ckpt_name
@ -262,6 +284,14 @@ class ModelPatcher(ModelManageable):
def parent(self) -> Optional["ModelPatcher"]:
return self._parent
@property
def force_cast_weights(self) -> bool:
return self._force_cast_weights
@force_cast_weights.setter
def force_cast_weights(self, value:bool) -> None:
self._force_cast_weights = value
def lowvram_patch_counter(self):
return self._memory_measurements.lowvram_patch_counter
@ -284,11 +314,14 @@ class ModelPatcher(ModelManageable):
n.patches_uuid = self.patches_uuid
n.object_patches = self.object_patches.copy()
n.weight_wrapper_patches = self.weight_wrapper_patches.copy()
n._model_options = copy.deepcopy(self.model_options)
n.backup = self.backup
n.object_patches_backup = self.object_patches_backup
n._parent = self
n.force_cast_weights = self.force_cast_weights
# attachments
n.attachments = {}
for k in self.attachments:
@ -435,6 +468,16 @@ class ModelPatcher(ModelManageable):
def add_object_patch(self, name, obj):
self.object_patches[name] = obj
def set_model_compute_dtype(self, dtype):
self.add_object_patch("manual_cast_dtype", dtype)
if dtype is not None:
self.force_cast_weights = True
self.patches_uuid = uuid.uuid4() #TODO: optimize by preventing a full model reload for this
def add_weight_wrapper(self, name, function):
self.weight_wrapper_patches[name] = self.weight_wrapper_patches.get(name, []) + [function]
self.patches_uuid = uuid.uuid4()
def get_model_object(self, name: str) -> torch.nn.Module | typing.Any:
"""Retrieves a nested attribute from an object using dot notation considering
object patches.
@ -617,6 +660,9 @@ class ModelPatcher(ModelManageable):
lowvram_weight = False
weight_key = "{}.weight".format(n)
bias_key = "{}.bias".format(n)
if not full_load and hasattr(m, "comfy_cast_weights"):
if mem_counter + module_mem >= lowvram_model_memory:
lowvram_weight = True
@ -624,34 +670,46 @@ class ModelPatcher(ModelManageable):
if hasattr(m, "prev_comfy_cast_weights"): # Already lowvramed
continue
weight_key = "{}.weight".format(n)
bias_key = "{}.bias".format(n)
cast_weight = self.force_cast_weights
if lowvram_weight:
if hasattr(m, "comfy_cast_weights"):
m.weight_function = []
m.bias_function = []
if weight_key in self.patches:
if force_patch_weights:
self.patch_weight_to_device(weight_key)
else:
m.weight_function = LowVramPatch(weight_key, self.patches)
m.weight_function = [LowVramPatch(weight_key, self.patches)]
patch_counter += 1
if bias_key in self.patches:
if force_patch_weights:
self.patch_weight_to_device(bias_key)
else:
m.bias_function = LowVramPatch(bias_key, self.patches)
m.bias_function = [LowVramPatch(bias_key, self.patches)]
patch_counter += 1
m.prev_comfy_cast_weights = m.comfy_cast_weights
m.comfy_cast_weights = True
cast_weight = True
else:
if hasattr(m, "comfy_cast_weights"):
if m.comfy_cast_weights:
wipe_lowvram_weight(m)
wipe_lowvram_weight(m)
if full_load or mem_counter + module_mem < lowvram_model_memory:
mem_counter += module_mem
load_completely.append((module_mem, n, m, params))
if cast_weight:
m.prev_comfy_cast_weights = m.comfy_cast_weights
m.comfy_cast_weights = True
if weight_key in self.weight_wrapper_patches:
m.weight_function.extend(self.weight_wrapper_patches[weight_key])
if bias_key in self.weight_wrapper_patches:
m.bias_function.extend(self.weight_wrapper_patches[bias_key])
mem_counter += move_weight_functions(m, device_to)
load_completely.sort(reverse=True)
for x in load_completely:
n = x[1]
@ -714,6 +772,7 @@ class ModelPatcher(ModelManageable):
self.unpatch_hooks()
if self._memory_measurements.model_lowvram:
for m in self.model.modules():
move_weight_functions(m, device_to)
wipe_lowvram_weight(m)
self._memory_measurements.model_lowvram = False
@ -780,15 +839,19 @@ class ModelPatcher(ModelManageable):
weight_key = "{}.weight".format(n)
bias_key = "{}.bias".format(n)
if move_weight:
cast_weight = self.force_cast_weights
m.to(device_to)
module_mem += move_weight_functions(m, device_to)
if lowvram_possible:
if weight_key in self.patches:
m.weight_function = LowVramPatch(weight_key, self.patches)
m.weight_function.append(LowVramPatch(weight_key, self.patches))
patch_counter += 1
if bias_key in self.patches:
m.bias_function = LowVramPatch(bias_key, self.patches)
m.bias_function.append(LowVramPatch(bias_key, self.patches))
patch_counter += 1
cast_weight = True
if cast_weight:
m.prev_comfy_cast_weights = m.comfy_cast_weights
m.comfy_cast_weights = True
m.comfy_patched_weights = False

View File

@ -46,6 +46,7 @@ class EPS(ModelSampling):
return model_input - model_output * sigma
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
if max_denoise:
noise = noise * torch.sqrt(1.0 + sigma ** 2.0)
else:
@ -79,9 +80,11 @@ class CONST(ModelSampling):
return model_input - model_output * sigma
def noise_scaling(self, sigma, noise, latent_image, max_denoise=False):
sigma = sigma.view(sigma.shape[:1] + (1,) * (noise.ndim - 1))
return sigma * noise + (1.0 - sigma) * latent_image
def inverse_noise_scaling(self, sigma, latent):
sigma = sigma.view(sigma.shape[:1] + (1,) * (latent.ndim - 1))
return latent / (1.0 - sigma)

View File

@ -3,7 +3,6 @@ import hashlib
from PIL import ImageFile, UnidentifiedImageError
from .cli_args import args
from .nodes.package_typing import CustomNode
def conditioning_set_values(conditioning, values: dict = None):
@ -50,7 +49,6 @@ def export_custom_nodes():
Must be called from within the module where the CustomNode classes are defined.
"""
import inspect
from abc import ABC
from .nodes.package_typing import CustomNode
# Get the calling module
@ -76,3 +74,13 @@ def export_custom_nodes():
del frame
return custom_nodes
def string_to_torch_dtype(string):
import torch
if string == "fp32":
return torch.float32
if string == "fp16":
return torch.float16
if string == "bf16":
return torch.bfloat16

View File

@ -946,7 +946,7 @@ class CLIPLoader:
@classmethod
def INPUT_TYPES(s):
return {"required": { "clip_name": (get_filename_list_with_downloadable("text_encoders", KNOWN_CLIP_MODELS),),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos"], ),
"type": (["stable_diffusion", "stable_cascade", "sd3", "stable_audio", "mochi", "ltxv", "pixart", "cosmos", "lumina2"], ),
},
"optional": {
"device": (["default", "cpu"], {"advanced": True}),
@ -956,7 +956,7 @@ class CLIPLoader:
CATEGORY = "advanced/loaders"
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5\ncosmos: old t5 xxl"
DESCRIPTION = "[Recipes]\n\nstable_diffusion: clip-l\nstable_cascade: clip-g\nsd3: t5 / clip-g / clip-l\nstable_audio: t5\nmochi: t5\ncosmos: old t5 xxl\nlumina2: gemma 2 2B"
def load_clip(self, clip_name, type="stable_diffusion", device="default"):
clip_type = sd.CLIPType.STABLE_DIFFUSION
@ -974,6 +974,8 @@ class CLIPLoader:
clip_type = sd.CLIPType.PIXART
elif type == "cosmos":
clip_type = sd.CLIPType.COSMOS
elif type == "lumina2":
clip_type = comfy.sd.CLIPType.LUMINA2
else:
logging.warning(f"Unknown clip type argument passed: {type} for model {clip_name}")
@ -1101,10 +1103,11 @@ class StyleModelApply:
for t in conditioning:
(txt, keys) = t
keys = keys.copy()
if strength_type == "attn_bias" and strength != 1.0:
# even if the strength is 1.0 (i.e, no change), if there's already a mask, we have to add to it
if "attention_mask" in keys or (strength_type == "attn_bias" and strength != 1.0):
# math.log raises an error if the argument is zero
# torch.log returns -inf, which is what we want
attn_bias = torch.log(torch.Tensor([strength]))
attn_bias = torch.log(torch.Tensor([strength if strength_type == "attn_bias" else 1.0]))
# get the size of the mask image
mask_ref_size = keys.get("attention_mask_img_shape", (1, 1))
n_ref = mask_ref_size[0] * mask_ref_size[1]
@ -1793,6 +1796,36 @@ class LoadImageMask:
return True
class LoadImageOutput(LoadImage):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("COMBO", {
"image_upload": True,
"image_folder": "output",
"remote": {
"route": "/internal/files/output",
"refresh_button": True,
"control_after_refresh": "first",
},
}),
}
}
DESCRIPTION = "Load an image from the output folder. When the refresh button is clicked, the node will update the image list and automatically select the first image, allowing for easy iteration."
EXPERIMENTAL = True
FUNCTION = "load_image_output"
def load_image_output(self, image):
return self.load_image(f"{image} [output]")
@classmethod
def VALIDATE_INPUTS(s, image):
return True
class ImageScale:
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
crop_methods = ["disabled", "center"]
@ -1979,6 +2012,7 @@ NODE_CLASS_MAPPINGS = {
"PreviewImage": PreviewImage,
"LoadImage": LoadImage,
"LoadImageMask": LoadImageMask,
"LoadImageOutput": LoadImageOutput,
"ImageScale": ImageScale,
"ImageScaleBy": ImageScaleBy,
"ImageInvert": ImageInvert,
@ -2081,6 +2115,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"PreviewImage": "Preview Image",
"LoadImage": "Load Image",
"LoadImageMask": "Load Image (as Mask)",
"LoadImageOutput": "Load Image (from Outputs)",
"ImageScale": "Upscale Image",
"ImageScaleBy": "Upscale Image By",
"ImageUpscaleWithModel": "Upscale Image (using Model)",

View File

@ -44,15 +44,17 @@ def cast_bias_weight(s, input=None, dtype=None, device=None, bias_dtype=None):
bias = None
non_blocking = True if torch.jit.is_tracing() or torch.jit.is_scripting() else model_management.device_supports_non_blocking(device)
if s.bias is not None:
has_function = s.bias_function is not None
has_function = len(s.bias_function) > 0
bias = model_management.cast_to(s.bias, bias_dtype, device, non_blocking=non_blocking, copy=has_function)
if has_function:
bias = s.bias_function(bias)
for f in s.bias_function:
bias = f(bias)
has_function = s.weight_function is not None
has_function = len(s.weight_function) > 0
weight = model_management.cast_to(s.weight, dtype, device, non_blocking=non_blocking, copy=has_function)
if has_function:
weight = s.weight_function(weight)
for f in s.weight_function:
weight = f(weight)
return weight, bias
@ -63,8 +65,8 @@ class SkipInit:
class CastWeightBiasOp:
comfy_cast_weights = False
weight_function = None
bias_function = None
weight_function = []
bias_function = []
class skip_init:
@ -118,7 +120,7 @@ class disable_weight_init:
return torch.nn.functional.linear(input, weight, bias)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -132,7 +134,7 @@ class disable_weight_init:
return self._conv_forward(input, weight, bias)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -146,7 +148,7 @@ class disable_weight_init:
return self._conv_forward(input, weight, bias)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -160,7 +162,7 @@ class disable_weight_init:
return self._conv_forward(input, weight, bias)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -174,7 +176,7 @@ class disable_weight_init:
return torch.nn.functional.group_norm(input, self.num_groups, weight, bias, self.eps)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -192,7 +194,7 @@ class disable_weight_init:
return torch.nn.functional.layer_norm(input, self.normalized_shape, weight, bias, self.eps)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -213,7 +215,7 @@ class disable_weight_init:
output_padding, self.groups, self.dilation)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -234,7 +236,7 @@ class disable_weight_init:
output_padding, self.groups, self.dilation)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@ -252,7 +254,7 @@ class disable_weight_init:
return torch.nn.functional.embedding(input, weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse).to(dtype=output_dtype)
def forward(self, *args, **kwargs):
if self.comfy_cast_weights:
if self.comfy_cast_weights or len(self.weight_function) > 0 or len(self.bias_function) > 0:
return self.forward_comfy_cast_weights(*args, **kwargs)
else:
if "out_dtype" in kwargs:

View File

@ -724,7 +724,8 @@ class Sampler:
KSAMPLER_NAMES = ["euler", "euler_cfg_pp", "euler_ancestral", "euler_ancestral_cfg_pp", "heun", "heunpp2", "dpm_2", "dpm_2_ancestral",
"lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_2s_ancestral_cfg_pp", "dpmpp_sde", "dpmpp_sde_gpu",
"dpmpp_2m", "dpmpp_2m_cfg_pp", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm",
"ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "gradient_estimation"]
"ipndm", "ipndm_v", "deis", "res_multistep", "res_multistep_cfg_pp", "res_multistep_ancestral", "res_multistep_ancestral_cfg_pp",
"gradient_estimation"]
class KSAMPLER(Sampler):

View File

@ -42,6 +42,7 @@ from .text_encoders import hunyuan_video
from .text_encoders import hydit
from .text_encoders import long_clipl
from .text_encoders import lt
from .text_encoders import lumina2
from .text_encoders import pixart_t5
from .text_encoders import sa_t5
from .text_encoders import sd2_clip
@ -676,6 +677,7 @@ class CLIPType(Enum):
HUNYUAN_VIDEO = 9
PIXART = 10
COSMOS = 11
LUMINA2 = 12
@dataclasses.dataclass
@ -704,6 +706,7 @@ class TEModel(Enum):
T5_BASE = 6
LLAMA3_8 = 7
T5_XXL_OLD = 8
GEMMA_2_2B = 9
def detect_te_model(sd):
@ -723,6 +726,8 @@ def detect_te_model(sd):
return TEModel.T5_XXL_OLD
if "encoder.block.0.layer.0.SelfAttention.k.weight" in sd:
return TEModel.T5_BASE
if 'model.layers.0.post_feedforward_layernorm.weight' in sd:
return TEModel.GEMMA_2_2B
if "model.layers.0.post_attention_layernorm.weight" in sd:
return TEModel.LLAMA3_8
return None
@ -762,6 +767,7 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
if "text_projection" in clip_data[i]:
clip_data[i]["text_projection.weight"] = clip_data[i]["text_projection"].transpose(0, 1) # old models saved with the CLIPSave node
tokenizer_data = {}
clip_target = CLIPTarget()
clip_target.params = {}
if len(clip_data) == 1:
@ -801,6 +807,10 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
elif te_model == TEModel.T5_BASE:
clip_target.clip = sa_t5.SAT5Model
clip_target.tokenizer = sa_t5.SAT5Tokenizer
elif te_model == TEModel.GEMMA_2_2B:
clip_target.clip = lumina2.te(**llama_detect(clip_data))
clip_target.tokenizer = lumina2.LuminaTokenizer
tokenizer_data["spiece_model"] = clip_data[0].get("spiece_model", None)
else:
if clip_type == CLIPType.SD3:
clip_target.clip = sd3_clip.sd3_clip(clip_l=True, clip_g=False, t5=False)
@ -830,7 +840,6 @@ def load_text_encoder_state_dicts(state_dicts=[], embedding_directory=None, clip
clip_target.tokenizer = sd3_clip.SD3Tokenizer
parameters = 0
tokenizer_data = {}
for c in clip_data:
parameters += utils.calculate_parameters(c)
tokenizer_data, model_options = long_clipl.model_options_long_clip(c, tokenizer_data, model_options)

View File

@ -500,9 +500,11 @@ SDTokenizerT = TypeVar('SDTokenizerT', bound='SDTokenizer')
class SDTokenizer:
def __init__(self, tokenizer_path: torch.Tensor | bytes | bytearray | memoryview | str | Path | Traversable = None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data=None):
def __init__(self, tokenizer_path: torch.Tensor | bytes | bytearray | memoryview | str | Path | Traversable = None, max_length=77, pad_with_end=True, embedding_directory=None, embedding_size=768, embedding_key='clip_l', tokenizer_class=CLIPTokenizer, has_start_token=True, has_end_token=True, pad_to_max_length=True, min_length=None, pad_token=None, end_token=None, tokenizer_data=None, tokenizer_args=None):
if tokenizer_data is None:
tokenizer_data = dict()
if tokenizer_args is None:
tokenizer_args = dict()
if tokenizer_path is None:
tokenizer_path = files.get_package_as_path("comfy.sd1_tokenizer")
if isinstance(tokenizer_path, Path):
@ -515,7 +517,7 @@ class SDTokenizer:
tokenizer_path = get_package_as_path('comfy.sd1_tokenizer')
self.tokenizer_class = tokenizer_class
self.tokenizer_path = tokenizer_path
self.tokenizer: PreTrainedTokenizerBase | SPieceTokenizer = tokenizer_class.from_pretrained(tokenizer_path)
self.tokenizer: PreTrainedTokenizerBase | SPieceTokenizer = tokenizer_class.from_pretrained(tokenizer_path, **tokenizer_args)
self.max_length = max_length
self.min_length = min_length
self.end_token = None
@ -699,11 +701,15 @@ SD1TokenizerT = TypeVar("SD1TokenizerT", bound="SD1Tokenizer")
class SD1Tokenizer:
def __init__(self, embedding_directory=None, tokenizer_data=None, clip_name="l", tokenizer=SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data: dict=None, clip_name="l", tokenizer=SDTokenizer, name=None):
if tokenizer_data is None:
tokenizer_data = {}
self.clip_name = clip_name
self.clip = "clip_{}".format(self.clip_name)
if name is not None:
self.clip_name = name
self.clip = "{}".format(self.clip_name)
else:
self.clip_name = clip_name
self.clip = "clip_{}".format(self.clip_name)
tokenizer = tokenizer_data.get("{}_tokenizer_class".format(self.clip), tokenizer)
self.sd_tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)
@ -729,7 +735,7 @@ class SD1Tokenizer:
return sd1_tokenizer
def state_dict(self):
return {}
return getattr(self, self.clip).state_dict()
class SD1CheckpointClipModel(SDClipModel):
def __init__(self, device="cpu", dtype=None, model_options=None, textmodel_json_config=None):

View File

@ -14,6 +14,7 @@ from .text_encoders import genmo
from .text_encoders import hunyuan_video
from .text_encoders import hydit
from .text_encoders import lt
from .text_encoders import lumina2
from .text_encoders import pixart_t5
from .text_encoders import sa_t5
from .text_encoders import sd2_clip
@ -63,7 +64,9 @@ class SD15(supported_models_base.BASE):
replace_prefix = {"clip_l.": "cond_stage_model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(sd1_clip.SD1Tokenizer, sd1_clip.SD1ClipModel)
@ -108,7 +111,9 @@ class SD20(supported_models_base.BASE):
state_dict = diffusers_convert.convert_text_enc_state_dict_v20(state_dict)
return state_dict
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(sd2_clip.SD2Tokenizer, sd2_clip.SD2ClipModel)
@ -173,7 +178,9 @@ class SDXLRefiner(supported_models_base.BASE):
state_dict_g = utils.state_dict_prefix_replace(state_dict_g, replace_prefix)
return state_dict_g
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(sdxl_clip.SDXLTokenizer, sdxl_clip.SDXLRefinerClipModel)
@ -246,7 +253,9 @@ class SDXL(supported_models_base.BASE):
state_dict_g = utils.state_dict_prefix_replace(state_dict_g, replace_prefix)
return state_dict_g
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(sdxl_clip.SDXLTokenizer, sdxl_clip.SDXLClipModel)
@ -322,7 +331,9 @@ class SVD_img2vid(supported_models_base.BASE):
out = model_base.SVD_img2vid(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return None
@ -390,7 +401,9 @@ class Stable_Zero123(supported_models_base.BASE):
out = model_base.Stable_Zero123(self, device=device, cc_projection_weight=state_dict["cc_projection.weight"], cc_projection_bias=state_dict["cc_projection.bias"])
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return None
@ -466,7 +479,9 @@ class Stable_Cascade_C(supported_models_base.BASE):
out = model_base.StableCascade_C(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(sdxl_clip.StableCascadeTokenizer, sdxl_clip.StableCascadeClipModel)
@ -541,7 +556,9 @@ class SD3(supported_models_base.BASE):
out = model_base.SD3(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
clip_l = False
clip_g = False
t5 = False
@ -585,7 +602,9 @@ class StableAudio(supported_models_base.BASE):
replace_prefix = {"": "model.model."}
return utils.state_dict_prefix_replace(state_dict, replace_prefix)
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(sa_t5.SAT5Tokenizer, sa_t5.SAT5Model)
@ -609,7 +628,9 @@ class AuraFlow(supported_models_base.BASE):
out = model_base.AuraFlow(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(aura_t5.AuraT5Tokenizer, aura_t5.AuraT5Model)
@ -675,7 +696,9 @@ class HunyuanDiT(supported_models_base.BASE):
out = model_base.HunyuanDiT(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
return supported_models_base.ClipTarget(hydit.HyditTokenizer, hydit.HyditModel)
@ -715,7 +738,9 @@ class Flux(supported_models_base.BASE):
out = model_base.Flux(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
pref = self.text_encoder_key_prefix[0]
t5_detect = sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
return supported_models_base.ClipTarget(flux.FluxTokenizer, flux.flux_clip(**t5_detect))
@ -802,7 +827,9 @@ class LTXV(supported_models_base.BASE):
out = model_base.LTXV(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
pref = self.text_encoder_key_prefix[0]
t5_detect = sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
return supported_models_base.ClipTarget(lt.LTXVT5Tokenizer, lt.ltxv_te(**t5_detect))
@ -885,7 +912,9 @@ class CosmosT2V(supported_models_base.BASE):
out = model_base.CosmosVideo(self, device=device)
return out
def clip_target(self, state_dict={}):
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
pref = self.text_encoder_key_prefix[0]
t5_detect = sd3_clip.t5_xxl_detect(state_dict, "{}t5xxl.transformer.".format(pref))
return supported_models_base.ClipTarget(cosmos.CosmosT5Tokenizer, cosmos.te(**t5_detect))
@ -902,6 +931,38 @@ class CosmosI2V(CosmosT2V):
return out
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo, CosmosT2V, CosmosI2V]
class Lumina2(supported_models_base.BASE):
unet_config = {
"image_model": "lumina2",
}
sampling_settings = {
"multiplier": 1.0,
"shift": 6.0,
}
memory_usage_factor = 1.2
unet_extra_config = {}
latent_format = latent_formats.Flux
supported_inference_dtypes = [torch.bfloat16, torch.float32]
vae_key_prefix = ["vae."]
text_encoder_key_prefix = ["text_encoders."]
def get_model(self, state_dict, prefix="", device=None):
out = model_base.Lumina2(self, device=device)
return out
def clip_target(self, state_dict=None):
if state_dict is None:
state_dict = {}
pref = self.text_encoder_key_prefix[0]
hunyuan_detect = hunyuan_video.llama_detect(state_dict, "{}gemma2_2b.transformer.".format(pref))
return supported_models_base.ClipTarget(lumina2.LuminaTokenizer, lumina2.te(**hunyuan_detect))
models = [Stable_Zero123, SD15_instructpix2pix, SD15, SD20, SD21UnclipL, SD21UnclipH, SDXL_instructpix2pix, SDXLRefiner, SDXL, SSD1B, KOALA_700M, KOALA_1B, Segmind_Vega, SD_X4Upscaler, Stable_Cascade_C, Stable_Cascade_B, SV3D_u, SV3D_p, SD3, StableAudio, AuraFlow, PixArtAlpha, PixArtSigma, HunyuanDiT, HunyuanDiT1, FluxInpaint, Flux, FluxSchnell, GenmoMochi, LTXV, HunyuanVideo, CosmosT2V, CosmosI2V, Lumina2]
models += [SVD_img2vid]

View File

@ -120,7 +120,7 @@ class BertModel_(torch.nn.Module):
mask = None
if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
x, i = self.encoder(x, mask, intermediate_output)
return x, i

View File

@ -1,9 +1,9 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Optional, Any
import torch
import torch.nn as nn
from ..ldm.common_dit import rms_norm
from ..ldm.modules.attention import optimized_attention_for_device
@ -19,21 +19,48 @@ class Llama2Config:
max_position_embeddings: int = 8192
rms_norm_eps: float = 1e-5
rope_theta: float = 500000.0
transformer_type: str = "llama"
head_dim = 128
rms_norm_add = False
mlp_activation = "silu"
@dataclass
class Gemma2_2B_Config:
vocab_size: int = 256000
hidden_size: int = 2304
intermediate_size: int = 9216
num_hidden_layers: int = 26
num_attention_heads: int = 8
num_key_value_heads: int = 4
max_position_embeddings: int = 8192
rms_norm_eps: float = 1e-6
rope_theta: float = 10000.0
transformer_type: str = "gemma2"
head_dim = 256
rms_norm_add = True
mlp_activation = "gelu_pytorch_tanh"
class RMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-5, device=None, dtype=None):
def __init__(self, dim: int, eps: float = 1e-5, add=False, device=None, dtype=None):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.empty(dim, device=device, dtype=dtype))
self.add = add
def forward(self, x: torch.Tensor):
return rms_norm(x, self.weight, self.eps)
w = self.weight
if self.add:
w = w + 1.0
return rms_norm(x, w, self.eps)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
@ -66,23 +93,24 @@ class Attention(nn.Module):
self.num_heads = config.num_attention_heads
self.num_kv_heads = config.num_key_value_heads
self.hidden_size = config.hidden_size
self.head_dim = self.hidden_size // self.num_heads
self.head_dim = config.head_dim
self.inner_size = self.num_heads * self.head_dim
ops = ops or nn
self.q_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype)
self.q_proj = ops.Linear(config.hidden_size, self.inner_size, bias=False, device=device, dtype=dtype)
self.k_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
self.v_proj = ops.Linear(config.hidden_size, self.num_kv_heads * self.head_dim, bias=False, device=device, dtype=dtype)
self.o_proj = ops.Linear(config.hidden_size, config.hidden_size, bias=False, device=device, dtype=dtype)
self.o_proj = ops.Linear(self.inner_size, config.hidden_size, bias=False, device=device, dtype=dtype)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
optimized_attention=None,
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
optimized_attention=None,
):
batch_size, seq_length, _ = hidden_states.shape
xq = self.q_proj(hidden_states)
xk = self.k_proj(hidden_states)
xv = self.v_proj(hidden_states)
@ -99,6 +127,7 @@ class Attention(nn.Module):
output = optimized_attention(xq, xk, xv, self.num_heads, mask=attention_mask, skip_reshape=True)
return self.o_proj(output)
class MLP(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
super().__init__()
@ -106,9 +135,14 @@ class MLP(nn.Module):
self.gate_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
self.up_proj = ops.Linear(config.hidden_size, config.intermediate_size, bias=False, device=device, dtype=dtype)
self.down_proj = ops.Linear(config.intermediate_size, config.hidden_size, bias=False, device=device, dtype=dtype)
if config.mlp_activation == "silu":
self.activation = torch.nn.functional.silu
elif config.mlp_activation == "gelu_pytorch_tanh":
self.activation = lambda a: torch.nn.functional.gelu(a, approximate="tanh")
def forward(self, x):
return self.down_proj(F.silu(self.gate_proj(x)) * self.up_proj(x))
return self.down_proj(self.activation(self.gate_proj(x)) * self.up_proj(x))
class TransformerBlock(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
@ -119,11 +153,11 @@ class TransformerBlock(nn.Module):
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype)
def forward(
self,
x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
optimized_attention=None,
self,
x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
optimized_attention=None,
):
# Self Attention
residual = x
@ -144,6 +178,47 @@ class TransformerBlock(nn.Module):
return x
class TransformerBlockGemma2(nn.Module):
def __init__(self, config: Llama2Config, device=None, dtype=None, ops: Any = None):
super().__init__()
self.self_attn = Attention(config, device=device, dtype=dtype, ops=ops)
self.mlp = MLP(config, device=device, dtype=dtype, ops=ops)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
self.pre_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
self.post_feedforward_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
def forward(
self,
x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
freqs_cis: Optional[torch.Tensor] = None,
optimized_attention=None,
):
# Self Attention
residual = x
x = self.input_layernorm(x)
x = self.self_attn(
hidden_states=x,
attention_mask=attention_mask,
freqs_cis=freqs_cis,
optimized_attention=optimized_attention,
)
x = self.post_attention_layernorm(x)
x = residual + x
# MLP
residual = x
x = self.pre_feedforward_layernorm(x)
x = self.mlp(x)
x = self.post_feedforward_layernorm(x)
x = residual + x
return x
class Llama2_(nn.Module):
def __init__(self, config, device=None, dtype=None, ops=None):
super().__init__()
@ -156,17 +231,27 @@ class Llama2_(nn.Module):
device=device,
dtype=dtype
)
if self.config.transformer_type == "gemma2":
transformer = TransformerBlockGemma2
self.normalize_in = True
else:
transformer = TransformerBlock
self.normalize_in = False
self.layers = nn.ModuleList([
TransformerBlock(config, device=device, dtype=dtype, ops=ops)
transformer(config, device=device, dtype=dtype, ops=ops)
for _ in range(config.num_hidden_layers)
])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, device=device, dtype=dtype)
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps, add=config.rms_norm_add, device=device, dtype=dtype)
# self.lm_head = ops.Linear(config.hidden_size, config.vocab_size, bias=False, device=device, dtype=dtype)
def forward(self, x, attention_mask=None, intermediate_output=None, final_layer_norm_intermediate=True, dtype=None):
x = self.embed_tokens(x, out_dtype=dtype)
freqs_cis = precompute_freqs_cis(self.config.hidden_size // self.config.num_attention_heads,
if self.normalize_in:
x *= self.config.hidden_size ** 0.5
freqs_cis = precompute_freqs_cis(self.config.head_dim,
x.shape[1],
self.config.rope_theta,
device=x.device)
@ -205,15 +290,7 @@ class Llama2_(nn.Module):
return x, intermediate
class Llama2(torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Llama2Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
class BaseLlama:
def get_input_embeddings(self):
return self.model.embed_tokens
@ -222,3 +299,23 @@ class Llama2(torch.nn.Module):
def forward(self, input_ids, *args, **kwargs):
return self.model(input_ids, *args, **kwargs)
class Llama2(BaseLlama, torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Llama2Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype
class Gemma2_2B(BaseLlama, torch.nn.Module):
def __init__(self, config_dict, dtype, device, operations):
super().__init__()
config = Gemma2_2B_Config(**config_dict)
self.num_layers = config.num_hidden_layers
self.model = Llama2_(config, device=device, dtype=dtype, ops=operations)
self.dtype = dtype

View File

@ -0,0 +1,44 @@
from comfy import sd1_clip
from .spiece_tokenizer import SPieceTokenizer
import comfy.text_encoders.llama
class Gemma2BTokenizer(sd1_clip.SDTokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
tokenizer = tokenizer_data.get("spiece_model", None)
super().__init__(tokenizer, pad_with_end=False, embedding_size=2304, embedding_key='gemma2_2b', tokenizer_class=SPieceTokenizer, has_end_token=False, pad_to_max_length=False, max_length=99999999, min_length=1, tokenizer_args={"add_bos": True, "add_eos": False})
def state_dict(self):
return {"spiece_model": self.tokenizer.serialize_model()}
class LuminaTokenizer(sd1_clip.SD1Tokenizer):
def __init__(self, embedding_directory=None, tokenizer_data={}):
super().__init__(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data, name="gemma2_2b", tokenizer=Gemma2BTokenizer)
class Gemma2_2BModel(sd1_clip.SDClipModel):
def __init__(self, device="cpu", layer="hidden", layer_idx=-2, dtype=None, attention_mask=True, model_options={}):
llama_scaled_fp8 = model_options.get("llama_scaled_fp8", None)
if llama_scaled_fp8 is not None:
model_options = model_options.copy()
model_options["scaled_fp8"] = llama_scaled_fp8
super().__init__(device=device, layer=layer, layer_idx=layer_idx, textmodel_json_config={}, dtype=dtype, special_tokens={"start": 2, "pad": 0}, layer_norm_hidden_state=False, model_class=comfy.text_encoders.llama.Gemma2_2B, enable_attention_masks=attention_mask, return_attention_masks=attention_mask, model_options=model_options)
class LuminaModel(sd1_clip.SD1ClipModel):
def __init__(self, device="cpu", dtype=None, model_options={}):
super().__init__(device=device, dtype=dtype, name="gemma2_2b", clip_model=Gemma2_2BModel, model_options=model_options)
def te(dtype_llama=None, llama_scaled_fp8=None):
class LuminaTEModel_(LuminaModel):
def __init__(self, device="cpu", dtype=None, model_options={}):
if llama_scaled_fp8 is not None and "llama_scaled_fp8" not in model_options:
model_options = model_options.copy()
model_options["llama_scaled_fp8"] = llama_scaled_fp8
if dtype_llama is not None:
dtype = dtype_llama
super().__init__(device=device, dtype=dtype, model_options=model_options)
return LuminaTEModel_

View File

@ -1,26 +1,31 @@
import copy
from pathlib import Path
import sentencepiece
import torch
class SPieceTokenizer:
add_eos = True
@staticmethod
def from_pretrained(path):
return SPieceTokenizer(path)
def from_pretrained(path, **kwargs):
return SPieceTokenizer(path, **kwargs)
def __init__(self, tokenizer_path):
def __init__(self, tokenizer_path: bytes | str | Path, add_bos=False, add_eos=True):
self.add_bos = add_bos
self.add_eos = add_eos
if torch.is_tensor(tokenizer_path):
tokenizer_path = tokenizer_path.numpy().tobytes()
construction_args = {}
construction_args = {
'add_bos': self.add_bos,
'add_eos': self.add_eos
}
if isinstance(tokenizer_path, bytes):
construction_args["model_proto"] = tokenizer_path
else:
construction_args["model_file"] = tokenizer_path
self.tokenizer = sentencepiece.SentencePieceProcessor(add_eos=SPieceTokenizer.add_eos, **construction_args) # pylint: disable=unexpected-keyword-arg
self.tokenizer = sentencepiece.SentencePieceProcessor(**construction_args) # pylint: disable=unexpected-keyword-arg
self.end = self.tokenizer.eos_id()
self.eos_token_id = self.end
@ -41,4 +46,3 @@ class SPieceTokenizer:
def clone(self):
return copy.copy(self)

View File

@ -214,7 +214,7 @@ class T5Stack(torch.nn.Module):
mask = None
if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], 1, -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
mask = mask.masked_fill(mask.to(torch.bool), -torch.finfo(x.dtype).max)
intermediate = None
optimized_attention = optimized_attention_for_device(x.device, mask=attention_mask is not None, small_input=True)

View File

@ -97,7 +97,7 @@ def load_torch_file(ckpt: str, safe_load=False, device=None):
if "HeaderTooLarge" in message:
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt or invalid. Make sure this is actually a safetensors file and not a ckpt or pt or other filetype.".format(message, ckpt))
if "MetadataIncompleteBuffer" in message:
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is incomplete. Check the file size and make sure you have copied/downloaded it correctly.".format(message, ckpt))
raise ValueError("{}\n\nFile path: {}\n\nThe safetensors file is corrupt/incomplete. Check the file size and make sure you have copied/downloaded it correctly.".format(message, ckpt))
raise e
elif ckpt.lower().endswith("index.json"):
# from accelerate

View File

@ -1,4 +1,8 @@
<<<<<<<< HEAD:comfy/web/assets/BaseViewTemplate-DDUNNAbV.js
import { d as defineComponent, U as ref, p as onMounted, b4 as isElectron, W as nextTick, b5 as electronAPI, o as openBlock, f as createElementBlock, i as withDirectives, v as vShow, j as unref, b6 as isNativeWindow, m as createBaseVNode, A as renderSlot, ai as normalizeClass } from "./index-BsGgXmrT.js";
========
import { d as defineComponent, T as ref, p as onMounted, b8 as isElectron, V as nextTick, b9 as electronAPI, o as openBlock, f as createElementBlock, i as withDirectives, v as vShow, j as unref, ba as isNativeWindow, m as createBaseVNode, A as renderSlot, aj as normalizeClass } from "./index-Bv0b06LE.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/BaseViewTemplate-BTbuZf5t.js
const _hoisted_1 = { class: "flex-grow w-full flex items-center justify-center overflow-auto" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "BaseViewTemplate",
@ -27,7 +31,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
});
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", {
class: normalizeClass(["font-sans w-screen h-screen flex flex-col pointer-events-auto", [
class: normalizeClass(["font-sans w-screen h-screen flex flex-col", [
props.dark ? "text-neutral-300 bg-neutral-900 dark-theme" : "text-neutral-900 bg-neutral-300"
]])
}, [
@ -48,4 +52,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as _
};
<<<<<<<< HEAD:comfy/web/assets/BaseViewTemplate-DDUNNAbV.js
//# sourceMappingURL=BaseViewTemplate-DDUNNAbV.js.map
========
//# sourceMappingURL=BaseViewTemplate-BTbuZf5t.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/BaseViewTemplate-BTbuZf5t.js

19
comfy/web/assets/DesktopStartView-D9r53Bue.js generated vendored Normal file
View File

@ -0,0 +1,19 @@
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, k as createVNode, j as unref, bE as script } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "DesktopStartView",
setup(__props) {
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$1, { dark: "" }, {
default: withCtx(() => [
createVNode(unref(script), { class: "m-8 w-48 h-48" })
]),
_: 1
});
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=DesktopStartView-D9r53Bue.js.map

View File

@ -1,22 +0,0 @@
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, k as createVNode, j as unref, bs as script } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
const _hoisted_1 = { class: "max-w-screen-sm w-screen p-8" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "DesktopStartView",
setup(__props) {
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$1, { dark: "" }, {
default: withCtx(() => [
createBaseVNode("div", _hoisted_1, [
createVNode(unref(script), { mode: "indeterminate" })
])
]),
_: 1
});
};
}
});
export {
_sfc_main as default
};
//# sourceMappingURL=DesktopStartView-elroCqfp.js.map

58
comfy/web/assets/DesktopUpdateView-C-R0415K.js generated vendored Normal file
View File

@ -0,0 +1,58 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, T as ref, d8 as onUnmounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, j as unref, bg as t, k as createVNode, bE as script, l as script$1, b9 as electronAPI, _ as _export_sfc } from "./index-Bv0b06LE.js";
import { s as script$2 } from "./index-A_bXPJCN.js";
import { _ as _sfc_main$1 } from "./TerminalOutputDrawer-CKr7Br7O.js";
import { _ as _sfc_main$2 } from "./BaseViewTemplate-BTbuZf5t.js";
const _hoisted_1 = { class: "h-screen w-screen grid items-center justify-around overflow-y-auto" };
const _hoisted_2 = { class: "relative m-8 text-center" };
const _hoisted_3 = { class: "download-bg pi-download text-4xl font-bold" };
const _hoisted_4 = { class: "m-8" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "DesktopUpdateView",
setup(__props) {
const electron = electronAPI();
const terminalVisible = ref(false);
const toggleConsoleDrawer = /* @__PURE__ */ __name(() => {
terminalVisible.value = !terminalVisible.value;
}, "toggleConsoleDrawer");
onUnmounted(() => electron.Validation.dispose());
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$2, { dark: "" }, {
default: withCtx(() => [
createBaseVNode("div", _hoisted_1, [
createBaseVNode("div", _hoisted_2, [
createBaseVNode("h1", _hoisted_3, toDisplayString(unref(t)("desktopUpdate.title")), 1),
createBaseVNode("div", _hoisted_4, [
createBaseVNode("span", null, toDisplayString(unref(t)("desktopUpdate.description")), 1)
]),
createVNode(unref(script), { class: "m-8 w-48 h-48" }),
createVNode(unref(script$1), {
style: { "transform": "translateX(-50%)" },
class: "fixed bottom-0 left-1/2 my-8",
label: unref(t)("maintenance.consoleLogs"),
icon: "pi pi-desktop",
"icon-pos": "left",
severity: "secondary",
onClick: toggleConsoleDrawer
}, null, 8, ["label"]),
createVNode(_sfc_main$1, {
modelValue: terminalVisible.value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => terminalVisible.value = $event),
header: unref(t)("g.terminal"),
"default-message": unref(t)("desktopUpdate.terminalDefaultMessage")
}, null, 8, ["modelValue", "header", "default-message"])
])
]),
createVNode(unref(script$2))
]),
_: 1
});
};
}
});
const DesktopUpdateView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-8d77828d"]]);
export {
DesktopUpdateView as default
};
//# sourceMappingURL=DesktopUpdateView-C-R0415K.js.map

20
comfy/web/assets/DesktopUpdateView-CxchaIvw.css generated vendored Normal file
View File

@ -0,0 +1,20 @@
.download-bg[data-v-8d77828d]::before {
position: absolute;
margin: 0px;
color: var(--p-text-muted-color);
font-family: 'primeicons';
top: -2rem;
right: 2rem;
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
line-height: 1;
display: inline-block;
-webkit-font-smoothing: antialiased;
opacity: 0.02;
font-size: min(14rem, 90vw);
z-index: 0
}

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/DownloadGitView-BFcFCk37.js
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, be as useRouter } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
========
import { d as defineComponent, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, bi as useRouter } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/DownloadGitView-PWqK5ke4.js
const _hoisted_1 = { class: "max-w-screen-sm flex flex-col gap-8 p-8 bg-[url('/assets/images/Git-Logo-White.svg')] bg-no-repeat bg-right-top bg-origin-padding" };
const _hoisted_2 = { class: "mt-24 text-4xl font-bold text-red-500" };
const _hoisted_3 = { class: "space-y-4" };
@ -55,4 +60,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
<<<<<<<< HEAD:comfy/web/assets/DownloadGitView-BFcFCk37.js
//# sourceMappingURL=DownloadGitView-BFcFCk37.js.map
========
//# sourceMappingURL=DownloadGitView-PWqK5ke4.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/DownloadGitView-PWqK5ke4.js

View File

@ -1,8 +1,14 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/ExtensionPanel-BPpLOa_B.js
import { d as defineComponent, U as ref, df as FilterMatchMode, dk as useExtensionStore, a as useSettingStore, p as onMounted, c as computed, o as openBlock, y as createBlock, z as withCtx, k as createVNode, dg as SearchBox, j as unref, bj as script, m as createBaseVNode, f as createElementBlock, D as renderList, E as toDisplayString, a7 as createTextVNode, F as Fragment, l as script$1, B as createCommentVNode, a4 as script$3, ax as script$4, bn as script$5, dh as _sfc_main$1 } from "./index-BsGgXmrT.js";
import { g as script$2, h as script$6 } from "./index-Br6dw1F6.js";
import "./index-COyiXDAn.js";
========
import { d as defineComponent, T as ref, dx as FilterMatchMode, dC as useExtensionStore, a as useSettingStore, p as onMounted, c as computed, o as openBlock, y as createBlock, z as withCtx, k as createVNode, dy as SearchBox, j as unref, bn as script, m as createBaseVNode, f as createElementBlock, D as renderList, E as toDisplayString, a8 as createTextVNode, F as Fragment, l as script$1, B as createCommentVNode, a5 as script$3, ay as script$4, br as script$5, dz as _sfc_main$1 } from "./index-Bv0b06LE.js";
import { g as script$2, h as script$6 } from "./index-CgMyWf7n.js";
import "./index-Dzu9WL4p.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ExtensionPanel-Ba57xrmg.js
const _hoisted_1 = { class: "flex justify-end" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "ExtensionPanel",
@ -179,4 +185,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
<<<<<<<< HEAD:comfy/web/assets/ExtensionPanel-BPpLOa_B.js
//# sourceMappingURL=ExtensionPanel-BPpLOa_B.js.map
========
//# sourceMappingURL=ExtensionPanel-Ba57xrmg.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ExtensionPanel-Ba57xrmg.js

4919
comfy/web/assets/GraphView-B_UDZi95.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,5 @@
.comfy-menu-hamburger[data-v-7ed57d1a] {
pointer-events: auto;
.comfy-menu-hamburger[data-v-82120b51] {
position: fixed;
z-index: 9999;
display: flex;
@ -41,19 +40,19 @@
z-index: 999;
}
.p-buttongroup-vertical[data-v-cb8f9a1a] {
.p-buttongroup-vertical[data-v-27a9500c] {
display: flex;
flex-direction: column;
border-radius: var(--p-button-border-radius);
overflow: hidden;
border: 1px solid var(--p-panel-border-color);
}
.p-buttongroup-vertical .p-button[data-v-cb8f9a1a] {
.p-buttongroup-vertical .p-button[data-v-27a9500c] {
margin: 0;
border-radius: 0;
}
.node-tooltip[data-v-46859edf] {
.node-tooltip[data-v-f03142eb] {
background: var(--comfy-input-bg);
border-radius: 5px;
box-shadow: 0 0 5px rgba(0, 0, 0, 0.4);
@ -133,13 +132,11 @@
border-right: 4px solid var(--p-button-text-primary-color);
}
.side-tool-bar-container[data-v-33cac83a] {
.side-tool-bar-container[data-v-04875455] {
display: flex;
flex-direction: column;
align-items: center;
pointer-events: auto;
width: var(--sidebar-width);
height: 100%;
@ -150,16 +147,16 @@
--sidebar-width: 4rem;
--sidebar-icon-size: 1.5rem;
}
.side-tool-bar-container.small-sidebar[data-v-33cac83a] {
.side-tool-bar-container.small-sidebar[data-v-04875455] {
--sidebar-width: 2.5rem;
--sidebar-icon-size: 1rem;
}
.side-tool-bar-end[data-v-33cac83a] {
.side-tool-bar-end[data-v-04875455] {
align-self: flex-end;
margin-top: auto;
}
.status-indicator[data-v-8d011a31] {
.status-indicator[data-v-fd6ae3af] {
position: absolute;
font-weight: 700;
font-size: 1.5rem;
@ -221,7 +218,7 @@
border-radius: 0px
}
[data-v-38831d8e] .workflow-tabs {
[data-v-6ab68035] .workflow-tabs {
background-color: var(--comfy-menu-bg);
}
@ -235,31 +232,36 @@
border-bottom-right-radius: 0;
}
.actionbar[data-v-915e5456] {
.actionbar[data-v-ebd56d51] {
pointer-events: all;
position: fixed;
z-index: 1000;
}
.actionbar.is-docked[data-v-915e5456] {
.actionbar.is-docked[data-v-ebd56d51] {
position: static;
border-style: none;
background-color: transparent;
padding: 0px;
}
.actionbar.is-dragging[data-v-915e5456] {
.actionbar.is-dragging[data-v-ebd56d51] {
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
}
[data-v-915e5456] .p-panel-content {
[data-v-ebd56d51] .p-panel-content {
padding: 0.25rem;
}
.is-docked[data-v-915e5456] .p-panel-content {
.is-docked[data-v-ebd56d51] .p-panel-content {
padding: 0px;
}
[data-v-915e5456] .p-panel-header {
[data-v-ebd56d51] .p-panel-header {
display: none;
}
.drag-handle[data-v-ebd56d51] {
height: -moz-max-content;
height: max-content;
width: 0.75rem;
}
.top-menubar[data-v-56df69d2] .p-menubar-item-link svg {
display: none;
@ -275,7 +277,11 @@
border-style: solid;
}
<<<<<<<< HEAD:comfy/web/assets/GraphView-BL5xAPb-.css
.comfyui-menu[data-v-929e7543] {
========
.comfyui-menu[data-v-68d3b5b9] {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/GraphView-Bo28XDd0.css
width: 100vw;
height: var(--comfy-topbar-height);
background: var(--comfy-menu-bg);
@ -288,6 +294,7 @@
order: 0;
grid-column: 1/-1;
}
<<<<<<<< HEAD:comfy/web/assets/GraphView-BL5xAPb-.css
.comfyui-menu.dropzone[data-v-929e7543] {
background: var(--p-highlight-background);
}
@ -298,9 +305,96 @@
line-height: revert;
}
.comfyui-logo[data-v-929e7543] {
========
.comfyui-menu.dropzone[data-v-68d3b5b9] {
background: var(--p-highlight-background);
}
.comfyui-menu.dropzone-active[data-v-68d3b5b9] {
background: var(--p-highlight-background-focus);
}
[data-v-68d3b5b9] .p-menubar-item-label {
line-height: revert;
}
.comfyui-logo[data-v-68d3b5b9] {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/GraphView-Bo28XDd0.css
font-size: 1.2em;
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
cursor: default;
}
.comfyui-body[data-v-e89d9273] {
grid-template-columns: auto 1fr auto;
grid-template-rows: auto 1fr auto;
}
/**
+------------------+------------------+------------------+
| |
| .comfyui-body- |
| top |
| (spans all cols) |
| |
+------------------+------------------+------------------+
| | | |
| .comfyui-body- | #graph-canvas | .comfyui-body- |
| left | | right |
| | | |
| | | |
+------------------+------------------+------------------+
| |
| .comfyui-body- |
| bottom |
| (spans all cols) |
| |
+------------------+------------------+------------------+
*/
.comfyui-body-top[data-v-e89d9273] {
order: -5;
/* Span across all columns */
grid-column: 1/-1;
/* Position at the first row */
grid-row: 1;
/* Top menu bar dropdown needs to be above of graph canvas splitter overlay which is z-index: 999 */
/* Top menu bar z-index needs to be higher than bottom menu bar z-index as by default
pysssss's image feed is located at body-bottom, and it can overlap with the queue button, which
is located in body-top. */
z-index: 1001;
display: flex;
flex-direction: column;
}
.comfyui-body-left[data-v-e89d9273] {
order: -4;
/* Position in the first column */
grid-column: 1;
/* Position below the top element */
grid-row: 2;
z-index: 10;
display: flex;
}
.graph-canvas-container[data-v-e89d9273] {
width: 100%;
height: 100%;
order: -3;
grid-column: 2;
grid-row: 2;
position: relative;
overflow: hidden;
}
.comfyui-body-right[data-v-e89d9273] {
order: -2;
z-index: 10;
grid-column: 3;
grid-row: 2;
}
.comfyui-body-bottom[data-v-e89d9273] {
order: 4;
/* Span across all columns */
grid-column: 1/-1;
grid-row: 3;
/* Bottom menu bar dropdown needs to be above of graph canvas splitter overlay which is z-index: 999 */
z-index: 1000;
display: flex;
flex-direction: column;
}

View File

@ -1,12 +1,13 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { d as defineComponent, U as ref, bm as useModel, o as openBlock, f as createElementBlock, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, bn as script, bh as script$1, ar as withModifiers, z as withCtx, ab as script$2, K as useI18n, c as computed, ai as normalizeClass, B as createCommentVNode, a4 as script$3, a7 as createTextVNode, b5 as electronAPI, _ as _export_sfc, p as onMounted, r as resolveDirective, bg as script$4, i as withDirectives, bo as script$5, bp as script$6, l as script$7, y as createBlock, bj as script$8, bq as MigrationItems, w as watchEffect, F as Fragment, D as renderList, br as script$9, be as useRouter, ag as toRaw } from "./index-BsGgXmrT.js";
import { s as script$a, a as script$b, b as script$c, c as script$d, d as script$e } from "./index-DC_-jkme.js";
import { _ as _sfc_main$5 } from "./BaseViewTemplate-DDUNNAbV.js";
const _hoisted_1$4 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$4 = { class: "flex flex-col gap-4" };
const _hoisted_3$4 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$4 = { class: "text-neutral-400 my-0" };
import { d as defineComponent, T as ref, bq as useModel, o as openBlock, f as createElementBlock, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, br as script, bl as script$1, as as withModifiers, z as withCtx, ac as script$2, I as useI18n, c as computed, aj as normalizeClass, B as createCommentVNode, a5 as script$3, a8 as createTextVNode, b9 as electronAPI, _ as _export_sfc, p as onMounted, r as resolveDirective, bk as script$4, i as withDirectives, bs as script$5, bt as script$6, l as script$7, y as createBlock, bn as script$8, bu as MigrationItems, w as watchEffect, F as Fragment, D as renderList, bv as script$9, bw as mergeModels, bx as ValidationState, X as normalizeI18nKey, N as watch, by as checkMirrorReachable, bz as _sfc_main$7, bA as isInChina, bB as mergeValidationStates, bg as t, b3 as script$a, bC as CUDA_TORCH_URL, bD as NIGHTLY_CPU_TORCH_URL, bi as useRouter, ah as toRaw } from "./index-Bv0b06LE.js";
import { s as script$b, a as script$c, b as script$d, c as script$e, d as script$f } from "./index-SeIZOWJp.js";
import { P as PYTHON_MIRROR, a as PYPI_MIRROR } from "./uvMirrors-B-HKMf6X.js";
import { _ as _sfc_main$8 } from "./BaseViewTemplate-BTbuZf5t.js";
const _hoisted_1$5 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$5 = { class: "flex flex-col gap-4" };
const _hoisted_3$5 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$5 = { class: "text-neutral-400 my-0" };
const _hoisted_5$3 = { class: "flex flex-col bg-neutral-800 p-4 rounded-lg" };
const _hoisted_6$3 = { class: "flex items-center gap-4" };
const _hoisted_7$3 = { class: "flex-1" };
@ -27,7 +28,7 @@ const _hoisted_20 = {
target: "_blank",
class: "text-blue-400 hover:text-blue-300 underline"
};
const _sfc_main$4 = /* @__PURE__ */ defineComponent({
const _sfc_main$6 = /* @__PURE__ */ defineComponent({
__name: "DesktopSettingsConfiguration",
props: {
"autoUpdate": { type: Boolean, ...{ required: true } },
@ -44,10 +45,10 @@ const _sfc_main$4 = /* @__PURE__ */ defineComponent({
showDialog.value = true;
}, "showMetricsInfo");
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$4, [
createBaseVNode("div", _hoisted_2$4, [
createBaseVNode("h2", _hoisted_3$4, toDisplayString(_ctx.$t("install.desktopAppSettings")), 1),
createBaseVNode("p", _hoisted_4$4, toDisplayString(_ctx.$t("install.desktopAppSettingsDescription")), 1)
return openBlock(), createElementBlock("div", _hoisted_1$5, [
createBaseVNode("div", _hoisted_2$5, [
createBaseVNode("h2", _hoisted_3$5, toDisplayString(_ctx.$t("install.desktopAppSettings")), 1),
createBaseVNode("p", _hoisted_4$5, toDisplayString(_ctx.$t("install.desktopAppSettingsDescription")), 1)
]),
createBaseVNode("div", _hoisted_5$3, [
createBaseVNode("div", _hoisted_6$3, [
@ -122,10 +123,10 @@ const _sfc_main$4 = /* @__PURE__ */ defineComponent({
const _imports_0 = "" + new URL("images/nvidia-logo.svg", import.meta.url).href;
const _imports_1 = "" + new URL("images/apple-mps-logo.png", import.meta.url).href;
const _imports_2 = "" + new URL("images/manual-configuration.svg", import.meta.url).href;
const _hoisted_1$3 = { class: "flex flex-col gap-6 w-[600px] h-[30rem] select-none" };
const _hoisted_2$3 = { class: "grow flex flex-col gap-4 text-neutral-300" };
const _hoisted_3$3 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$3 = { class: "m-1 text-neutral-400" };
const _hoisted_1$4 = { class: "flex flex-col gap-6 w-[600px] h-[30rem] select-none" };
const _hoisted_2$4 = { class: "grow flex flex-col gap-4 text-neutral-300" };
const _hoisted_3$4 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$4 = { class: "m-1 text-neutral-400" };
const _hoisted_5$2 = {
key: 0,
class: "m-1"
@ -146,7 +147,7 @@ const _hoisted_12$2 = {
for: "cpu-mode",
class: "select-none"
};
const _sfc_main$3 = /* @__PURE__ */ defineComponent({
const _sfc_main$5 = /* @__PURE__ */ defineComponent({
__name: "GpuPicker",
props: {
"device": {
@ -156,7 +157,7 @@ const _sfc_main$3 = /* @__PURE__ */ defineComponent({
},
emits: ["update:device"],
setup(__props) {
const { t } = useI18n();
const { t: t2 } = useI18n();
const cpuMode = computed({
get: /* @__PURE__ */ __name(() => selected.value === "cpu", "get"),
set: /* @__PURE__ */ __name((value) => {
@ -171,10 +172,10 @@ const _sfc_main$3 = /* @__PURE__ */ defineComponent({
selected.value = newValue;
}, "pickGpu");
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$3, [
createBaseVNode("div", _hoisted_2$3, [
createBaseVNode("h2", _hoisted_3$3, toDisplayString(_ctx.$t("install.gpuSelection.selectGpu")), 1),
createBaseVNode("p", _hoisted_4$3, toDisplayString(_ctx.$t("install.gpuSelection.selectGpuDescription")) + ": ", 1),
return openBlock(), createElementBlock("div", _hoisted_1$4, [
createBaseVNode("div", _hoisted_2$4, [
createBaseVNode("h2", _hoisted_3$4, toDisplayString(_ctx.$t("install.gpuSelection.selectGpu")), 1),
createBaseVNode("p", _hoisted_4$4, toDisplayString(_ctx.$t("install.gpuSelection.selectGpuDescription")) + ": ", 1),
createBaseVNode("div", {
class: normalizeClass(["flex gap-2 text-center transition-opacity", { selected: selected.value }])
}, [
@ -240,7 +241,7 @@ const _sfc_main$3 = /* @__PURE__ */ defineComponent({
createVNode(unref(script$3), {
icon: "pi pi-exclamation-triangle",
severity: "warn",
value: unref(t)("icon.exclamation-triangle")
value: unref(t2)("icon.exclamation-triangle")
}, null, 8, ["value"]),
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.customSkipsPython")), 1)
]),
@ -258,7 +259,7 @@ const _sfc_main$3 = /* @__PURE__ */ defineComponent({
createVNode(unref(script$3), {
icon: "pi pi-exclamation-triangle",
severity: "warn",
value: unref(t)("icon.exclamation-triangle")
value: unref(t2)("icon.exclamation-triangle")
}, null, 8, ["value"]),
createTextVNode(" " + toDisplayString(_ctx.$t("install.gpuSelection.cpuModeDescription")), 1)
]),
@ -282,11 +283,11 @@ const _sfc_main$3 = /* @__PURE__ */ defineComponent({
};
}
});
const GpuPicker = /* @__PURE__ */ _export_sfc(_sfc_main$3, [["__scopeId", "data-v-79125ff6"]]);
const _hoisted_1$2 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$2 = { class: "flex flex-col gap-4" };
const _hoisted_3$2 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$2 = { class: "text-neutral-400 my-0" };
const GpuPicker = /* @__PURE__ */ _export_sfc(_sfc_main$5, [["__scopeId", "data-v-79125ff6"]]);
const _hoisted_1$3 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$3 = { class: "flex flex-col gap-4" };
const _hoisted_3$3 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$3 = { class: "text-neutral-400 my-0" };
const _hoisted_5$1 = { class: "flex gap-2" };
const _hoisted_6$1 = { class: "bg-neutral-800 p-4 rounded-lg" };
const _hoisted_7$1 = { class: "text-lg font-medium mt-0 mb-3 text-neutral-100" };
@ -297,7 +298,7 @@ const _hoisted_11$1 = { class: "pi pi-info-circle" };
const _hoisted_12$1 = { class: "flex items-center gap-2" };
const _hoisted_13 = { class: "text-neutral-200" };
const _hoisted_14 = { class: "pi pi-info-circle" };
const _sfc_main$2 = /* @__PURE__ */ defineComponent({
const _sfc_main$4 = /* @__PURE__ */ defineComponent({
__name: "InstallLocationPicker",
props: {
"installPath": { required: true },
@ -307,12 +308,13 @@ const _sfc_main$2 = /* @__PURE__ */ defineComponent({
},
emits: ["update:installPath", "update:pathError"],
setup(__props) {
const { t } = useI18n();
const { t: t2 } = useI18n();
const installPath = useModel(__props, "installPath");
const pathError = useModel(__props, "pathError");
const pathExists = ref(false);
const appData = ref("");
const appPath = ref("");
const inputTouched = ref(false);
const electron = electronAPI();
onMounted(async () => {
const paths = await electron.getSystemPaths();
@ -328,19 +330,19 @@ const _sfc_main$2 = /* @__PURE__ */ defineComponent({
const validation = await electron.validateInstallPath(path);
if (!validation.isValid) {
const errors = [];
if (validation.cannotWrite) errors.push(t("install.cannotWrite"));
if (validation.cannotWrite) errors.push(t2("install.cannotWrite"));
if (validation.freeSpace < validation.requiredSpace) {
const requiredGB = validation.requiredSpace / 1024 / 1024 / 1024;
errors.push(`${t("install.insufficientFreeSpace")}: ${requiredGB} GB`);
errors.push(`${t2("install.insufficientFreeSpace")}: ${requiredGB} GB`);
}
if (validation.parentMissing) errors.push(t("install.parentMissing"));
if (validation.parentMissing) errors.push(t2("install.parentMissing"));
if (validation.error)
errors.push(`${t("install.unhandledError")}: ${validation.error}`);
errors.push(`${t2("install.unhandledError")}: ${validation.error}`);
pathError.value = errors.join("\n");
}
if (validation.exists) pathExists.value = true;
} catch (error) {
pathError.value = t("install.pathValidationFailed");
pathError.value = t2("install.pathValidationFailed");
}
}, "validatePath");
const browsePath = /* @__PURE__ */ __name(async () => {
@ -351,15 +353,22 @@ const _sfc_main$2 = /* @__PURE__ */ defineComponent({
await validatePath(result);
}
} catch (error) {
pathError.value = t("install.failedToSelectDirectory");
pathError.value = t2("install.failedToSelectDirectory");
}
}, "browsePath");
const onFocus = /* @__PURE__ */ __name(() => {
if (!inputTouched.value) {
inputTouched.value = true;
return;
}
validatePath(installPath.value);
}, "onFocus");
return (_ctx, _cache) => {
const _directive_tooltip = resolveDirective("tooltip");
return openBlock(), createElementBlock("div", _hoisted_1$2, [
createBaseVNode("div", _hoisted_2$2, [
createBaseVNode("h2", _hoisted_3$2, toDisplayString(_ctx.$t("install.chooseInstallationLocation")), 1),
createBaseVNode("p", _hoisted_4$2, toDisplayString(_ctx.$t("install.installLocationDescription")), 1),
return openBlock(), createElementBlock("div", _hoisted_1$3, [
createBaseVNode("div", _hoisted_2$3, [
createBaseVNode("h2", _hoisted_3$3, toDisplayString(_ctx.$t("install.chooseInstallationLocation")), 1),
createBaseVNode("p", _hoisted_4$3, toDisplayString(_ctx.$t("install.installLocationDescription")), 1),
createBaseVNode("div", _hoisted_5$1, [
createVNode(unref(script$6), { class: "flex-1" }, {
default: withCtx(() => [
@ -369,10 +378,16 @@ const _sfc_main$2 = /* @__PURE__ */ defineComponent({
_cache[0] || (_cache[0] = ($event) => installPath.value = $event),
validatePath
],
class: normalizeClass(["w-full", { "p-invalid": pathError.value }])
class: normalizeClass(["w-full", { "p-invalid": pathError.value }]),
onFocus
}, null, 8, ["modelValue", "class"]),
withDirectives(createVNode(unref(script$5), { class: "pi pi-info-circle" }, null, 512), [
[_directive_tooltip, _ctx.$t("install.installLocationTooltip")]
[
_directive_tooltip,
_ctx.$t("install.installLocationTooltip"),
void 0,
{ top: true }
]
])
]),
_: 1
@ -428,10 +443,10 @@ const _sfc_main$2 = /* @__PURE__ */ defineComponent({
};
}
});
const _hoisted_1$1 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$1 = { class: "flex flex-col gap-4" };
const _hoisted_3$1 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$1 = { class: "text-neutral-400 my-0" };
const _hoisted_1$2 = { class: "flex flex-col gap-6 w-[600px]" };
const _hoisted_2$2 = { class: "flex flex-col gap-4" };
const _hoisted_3$2 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_4$2 = { class: "text-neutral-400 my-0" };
const _hoisted_5 = { class: "flex gap-2" };
const _hoisted_6 = {
key: 0,
@ -446,7 +461,7 @@ const _hoisted_12 = {
key: 1,
class: "text-neutral-400 italic"
};
const _sfc_main$1 = /* @__PURE__ */ defineComponent({
const _sfc_main$3 = /* @__PURE__ */ defineComponent({
__name: "MigrationPicker",
props: {
"sourcePath": { required: false },
@ -458,7 +473,7 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
},
emits: ["update:sourcePath", "update:migrationItemIds"],
setup(__props) {
const { t } = useI18n();
const { t: t2 } = useI18n();
const electron = electronAPI();
const sourcePath = useModel(__props, "sourcePath");
const migrationItemIds = useModel(__props, "migrationItemIds");
@ -483,7 +498,7 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
if (!validation.isValid) pathError.value = validation.error;
} catch (error) {
console.error(error);
pathError.value = t("install.pathValidationFailed");
pathError.value = t2("install.pathValidationFailed");
}
}, "validateSource");
const browsePath = /* @__PURE__ */ __name(async () => {
@ -495,17 +510,17 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
}
} catch (error) {
console.error(error);
pathError.value = t("install.failedToSelectDirectory");
pathError.value = t2("install.failedToSelectDirectory");
}
}, "browsePath");
watchEffect(() => {
migrationItemIds.value = migrationItems.value.filter((item) => item.selected).map((item) => item.id);
});
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$1, [
createBaseVNode("div", _hoisted_2$1, [
createBaseVNode("h2", _hoisted_3$1, toDisplayString(_ctx.$t("install.migrateFromExistingInstallation")), 1),
createBaseVNode("p", _hoisted_4$1, toDisplayString(_ctx.$t("install.migrationSourcePathDescription")), 1),
return openBlock(), createElementBlock("div", _hoisted_1$2, [
createBaseVNode("div", _hoisted_2$2, [
createBaseVNode("h2", _hoisted_3$2, toDisplayString(_ctx.$t("install.migrateFromExistingInstallation")), 1),
createBaseVNode("p", _hoisted_4$2, toDisplayString(_ctx.$t("install.migrationSourcePathDescription")), 1),
createBaseVNode("div", _hoisted_5, [
createVNode(unref(script$4), {
modelValue: sourcePath.value,
@ -564,10 +579,170 @@ const _sfc_main$1 = /* @__PURE__ */ defineComponent({
};
}
});
const _hoisted_1$1 = { class: "flex flex-col items-center gap-4" };
const _hoisted_2$1 = { class: "w-full" };
const _hoisted_3$1 = { class: "text-lg font-medium text-neutral-100" };
const _hoisted_4$1 = { class: "text-sm text-neutral-400 mt-1" };
const _sfc_main$2 = /* @__PURE__ */ defineComponent({
__name: "MirrorItem",
props: /* @__PURE__ */ mergeModels({
item: {}
}, {
"modelValue": { required: true },
"modelModifiers": {}
}),
emits: /* @__PURE__ */ mergeModels(["state-change"], ["update:modelValue"]),
setup(__props, { emit: __emit }) {
const emit = __emit;
const modelValue = useModel(__props, "modelValue");
const validationState = ref(ValidationState.IDLE);
const normalizedSettingId = computed(() => {
return normalizeI18nKey(__props.item.settingId);
});
onMounted(() => {
modelValue.value = __props.item.mirror;
});
watch(validationState, (newState) => {
emit("state-change", newState);
if (newState === ValidationState.INVALID && modelValue.value === __props.item.mirror) {
modelValue.value = __props.item.fallbackMirror;
}
});
return (_ctx, _cache) => {
return openBlock(), createElementBlock("div", _hoisted_1$1, [
createBaseVNode("div", _hoisted_2$1, [
createBaseVNode("h3", _hoisted_3$1, toDisplayString(_ctx.$t(`settings.${normalizedSettingId.value}.name`)), 1),
createBaseVNode("p", _hoisted_4$1, toDisplayString(_ctx.$t(`settings.${normalizedSettingId.value}.tooltip`)), 1)
]),
createVNode(_sfc_main$7, {
modelValue: modelValue.value,
"onUpdate:modelValue": _cache[0] || (_cache[0] = ($event) => modelValue.value = $event),
"validate-url-fn": /* @__PURE__ */ __name((mirror) => unref(checkMirrorReachable)(mirror + (_ctx.item.validationPathSuffix ?? "")), "validate-url-fn"),
onStateChange: _cache[1] || (_cache[1] = ($event) => validationState.value = $event)
}, null, 8, ["modelValue", "validate-url-fn"])
]);
};
}
});
const _sfc_main$1 = /* @__PURE__ */ defineComponent({
__name: "MirrorsConfiguration",
props: /* @__PURE__ */ mergeModels({
device: {}
}, {
"pythonMirror": { required: true },
"pythonMirrorModifiers": {},
"pypiMirror": { required: true },
"pypiMirrorModifiers": {},
"torchMirror": { required: true },
"torchMirrorModifiers": {}
}),
emits: ["update:pythonMirror", "update:pypiMirror", "update:torchMirror"],
setup(__props) {
const showMirrorInputs = ref(false);
const pythonMirror = useModel(__props, "pythonMirror");
const pypiMirror = useModel(__props, "pypiMirror");
const torchMirror = useModel(__props, "torchMirror");
const getTorchMirrorItem = /* @__PURE__ */ __name((device) => {
const settingId = "Comfy-Desktop.UV.TorchInstallMirror";
switch (device) {
case "mps":
return {
settingId,
mirror: NIGHTLY_CPU_TORCH_URL,
fallbackMirror: NIGHTLY_CPU_TORCH_URL
};
case "nvidia":
return {
settingId,
mirror: CUDA_TORCH_URL,
fallbackMirror: CUDA_TORCH_URL
};
case "cpu":
default:
return {
settingId,
mirror: PYPI_MIRROR.mirror,
fallbackMirror: PYPI_MIRROR.fallbackMirror
};
}
}, "getTorchMirrorItem");
const userIsInChina = ref(false);
onMounted(async () => {
userIsInChina.value = await isInChina();
});
const useFallbackMirror = /* @__PURE__ */ __name((mirror) => ({
...mirror,
mirror: mirror.fallbackMirror
}), "useFallbackMirror");
const mirrors = computed(
() => [
[PYTHON_MIRROR, pythonMirror],
[PYPI_MIRROR, pypiMirror],
[getTorchMirrorItem(__props.device), torchMirror]
].map(([item, modelValue]) => [
userIsInChina.value ? useFallbackMirror(item) : item,
modelValue
])
);
const validationStates = ref(
mirrors.value.map(() => ValidationState.IDLE)
);
const validationState = computed(() => {
return mergeValidationStates(validationStates.value);
});
const validationStateTooltip = computed(() => {
switch (validationState.value) {
case ValidationState.INVALID:
return t("install.settings.mirrorsUnreachable");
case ValidationState.VALID:
return t("install.settings.mirrorsReachable");
default:
return t("install.settings.checkingMirrors");
}
});
return (_ctx, _cache) => {
const _directive_tooltip = resolveDirective("tooltip");
return openBlock(), createBlock(unref(script$a), {
header: _ctx.$t("install.settings.mirrorSettings"),
toggleable: "",
collapsed: !showMirrorInputs.value,
"pt:root": "bg-neutral-800 border-none w-[600px]"
}, {
icons: withCtx(() => [
withDirectives(createBaseVNode("i", {
class: normalizeClass({
"pi pi-spin pi-spinner text-neutral-400": validationState.value === unref(ValidationState).LOADING,
"pi pi-check text-green-500": validationState.value === unref(ValidationState).VALID,
"pi pi-times text-red-500": validationState.value === unref(ValidationState).INVALID
})
}, null, 2), [
[_directive_tooltip, validationStateTooltip.value]
])
]),
default: withCtx(() => [
(openBlock(true), createElementBlock(Fragment, null, renderList(mirrors.value, ([item, modelValue], index) => {
return openBlock(), createElementBlock(Fragment, {
key: item.settingId + item.mirror
}, [
index > 0 ? (openBlock(), createBlock(unref(script$1), { key: 0 })) : createCommentVNode("", true),
createVNode(_sfc_main$2, {
item,
modelValue: modelValue.value,
"onUpdate:modelValue": /* @__PURE__ */ __name(($event) => modelValue.value = $event, "onUpdate:modelValue"),
onStateChange: /* @__PURE__ */ __name(($event) => validationStates.value[index] = $event, "onStateChange")
}, null, 8, ["item", "modelValue", "onUpdate:modelValue", "onStateChange"])
], 64);
}), 128))
]),
_: 1
}, 8, ["header", "collapsed"]);
};
}
});
const _hoisted_1 = { class: "flex pt-6 justify-end" };
const _hoisted_2 = { class: "flex pt-6 justify-between" };
const _hoisted_3 = { class: "flex pt-6 justify-between" };
const _hoisted_4 = { class: "flex pt-6 justify-between" };
const _hoisted_4 = { class: "flex mt-6 justify-between" };
const _sfc_main = /* @__PURE__ */ defineComponent({
__name: "InstallView",
setup(__props) {
@ -578,6 +753,9 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
const migrationItemIds = ref([]);
const autoUpdate = ref(true);
const allowMetrics = ref(true);
const pythonMirror = ref("");
const pypiMirror = ref("");
const torchMirror = ref("");
const highestStep = ref(0);
const handleStepChange = /* @__PURE__ */ __name((value) => {
setHighestStep(value);
@ -600,6 +778,9 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
allowMetrics: allowMetrics.value,
migrationSourcePath: migrationSourcePath.value,
migrationItemIds: toRaw(migrationItemIds.value),
pythonMirror: pythonMirror.value,
pypiMirror: pypiMirror.value,
torchMirror: torchMirror.value,
device: device.value
};
electron.installComfyUI(options);
@ -618,23 +799,23 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
});
});
return (_ctx, _cache) => {
return openBlock(), createBlock(_sfc_main$5, { dark: "" }, {
return openBlock(), createBlock(_sfc_main$8, { dark: "" }, {
default: withCtx(() => [
createVNode(unref(script$e), {
createVNode(unref(script$f), {
class: "h-full p-8 2xl:p-16",
value: "0",
"onUpdate:value": handleStepChange
}, {
default: withCtx(() => [
createVNode(unref(script$a), { class: "select-none" }, {
createVNode(unref(script$b), { class: "select-none" }, {
default: withCtx(() => [
createVNode(unref(script$b), { value: "0" }, {
createVNode(unref(script$c), { value: "0" }, {
default: withCtx(() => [
createTextVNode(toDisplayString(_ctx.$t("install.gpu")), 1)
]),
_: 1
}),
createVNode(unref(script$b), {
createVNode(unref(script$c), {
value: "1",
disabled: noGpu.value
}, {
@ -643,7 +824,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
_: 1
}, 8, ["disabled"]),
createVNode(unref(script$b), {
createVNode(unref(script$c), {
value: "2",
disabled: noGpu.value || hasError.value || highestStep.value < 1
}, {
@ -652,7 +833,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
_: 1
}, 8, ["disabled"]),
createVNode(unref(script$b), {
createVNode(unref(script$c), {
value: "3",
disabled: noGpu.value || hasError.value || highestStep.value < 2
}, {
@ -664,9 +845,9 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
_: 1
}),
createVNode(unref(script$c), null, {
createVNode(unref(script$d), null, {
default: withCtx(() => [
createVNode(unref(script$d), { value: "0" }, {
createVNode(unref(script$e), { value: "0" }, {
default: withCtx(({ activateCallback }) => [
createVNode(GpuPicker, {
device: device.value,
@ -684,9 +865,9 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
_: 1
}),
createVNode(unref(script$d), { value: "1" }, {
createVNode(unref(script$e), { value: "1" }, {
default: withCtx(({ activateCallback }) => [
createVNode(_sfc_main$2, {
createVNode(_sfc_main$4, {
installPath: installPath.value,
"onUpdate:installPath": _cache[1] || (_cache[1] = ($event) => installPath.value = $event),
pathError: pathError.value,
@ -710,9 +891,9 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
_: 1
}),
createVNode(unref(script$d), { value: "2" }, {
createVNode(unref(script$e), { value: "2" }, {
default: withCtx(({ activateCallback }) => [
createVNode(_sfc_main$1, {
createVNode(_sfc_main$3, {
sourcePath: migrationSourcePath.value,
"onUpdate:sourcePath": _cache[3] || (_cache[3] = ($event) => migrationSourcePath.value = $event),
migrationItemIds: migrationItemIds.value,
@ -735,14 +916,24 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
]),
_: 1
}),
createVNode(unref(script$d), { value: "3" }, {
createVNode(unref(script$e), { value: "3" }, {
default: withCtx(({ activateCallback }) => [
createVNode(_sfc_main$4, {
createVNode(_sfc_main$6, {
autoUpdate: autoUpdate.value,
"onUpdate:autoUpdate": _cache[5] || (_cache[5] = ($event) => autoUpdate.value = $event),
allowMetrics: allowMetrics.value,
"onUpdate:allowMetrics": _cache[6] || (_cache[6] = ($event) => allowMetrics.value = $event)
}, null, 8, ["autoUpdate", "allowMetrics"]),
createVNode(_sfc_main$1, {
device: device.value,
pythonMirror: pythonMirror.value,
"onUpdate:pythonMirror": _cache[7] || (_cache[7] = ($event) => pythonMirror.value = $event),
pypiMirror: pypiMirror.value,
"onUpdate:pypiMirror": _cache[8] || (_cache[8] = ($event) => pypiMirror.value = $event),
torchMirror: torchMirror.value,
"onUpdate:torchMirror": _cache[9] || (_cache[9] = ($event) => torchMirror.value = $event),
class: "mt-6"
}, null, 8, ["device", "pythonMirror", "pypiMirror", "torchMirror"]),
createBaseVNode("div", _hoisted_4, [
createVNode(unref(script$7), {
label: _ctx.$t("g.back"),
@ -755,7 +946,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
icon: "pi pi-check",
iconPos: "right",
disabled: hasError.value,
onClick: _cache[7] || (_cache[7] = ($event) => install())
onClick: _cache[10] || (_cache[10] = ($event) => install())
}, null, 8, ["label", "disabled"])
])
]),
@ -773,8 +964,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
};
}
});
const InstallView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-0a97b0ae"]]);
const InstallView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-cd6731d2"]]);
export {
InstallView as default
};
//# sourceMappingURL=InstallView-C1fnMZKt.js.map
//# sourceMappingURL=InstallView-DW9xwU_F.js.map

View File

@ -76,6 +76,6 @@ div.selected {
text-align: center;
}
[data-v-0a97b0ae] .p-steppanel {
[data-v-cd6731d2] .p-steppanel {
background-color: transparent
}

8
comfy/web/assets/KeybindingPanel-CDYVPYDp.css generated vendored Normal file
View File

@ -0,0 +1,8 @@
[data-v-8454e24f] .p-datatable-tbody > tr > td {
padding: 0.25rem;
min-height: 2rem
}
[data-v-8454e24f] .p-datatable-row-selected .actions,[data-v-8454e24f] .p-datatable-selectable-row:hover .actions {
visibility: visible
}

View File

@ -1,8 +0,0 @@
[data-v-2554ab36] .p-datatable-tbody > tr > td {
padding: 0.25rem;
min-height: 2rem
}
[data-v-2554ab36] .p-datatable-row-selected .actions,[data-v-2554ab36] .p-datatable-selectable-row:hover .actions {
visibility: visible
}

View File

@ -1,9 +1,16 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/KeybindingPanel-BRfso_Vt.js
import { d as defineComponent, c as computed, o as openBlock, f as createElementBlock, F as Fragment, D as renderList, k as createVNode, z as withCtx, a7 as createTextVNode, E as toDisplayString, j as unref, a4 as script, B as createCommentVNode, U as ref, df as FilterMatchMode, an as useKeybindingStore, L as useCommandStore, K as useI18n, Y as normalizeI18nKey, w as watchEffect, aR as useToast, r as resolveDirective, y as createBlock, dg as SearchBox, m as createBaseVNode, l as script$2, bg as script$4, ar as withModifiers, bj as script$5, ab as script$6, i as withDirectives, dh as _sfc_main$2, di as KeyComboImpl, dj as KeybindingImpl, _ as _export_sfc } from "./index-BsGgXmrT.js";
import { g as script$1, h as script$3 } from "./index-Br6dw1F6.js";
import { u as useKeybindingService } from "./keybindingService-DoUb2RT6.js";
import "./index-COyiXDAn.js";
========
import { d as defineComponent, c as computed, o as openBlock, f as createElementBlock, F as Fragment, D as renderList, k as createVNode, z as withCtx, a8 as createTextVNode, E as toDisplayString, j as unref, a5 as script, B as createCommentVNode, T as ref, dx as FilterMatchMode, ao as useKeybindingStore, J as useCommandStore, I as useI18n, X as normalizeI18nKey, w as watchEffect, aV as useToast, r as resolveDirective, y as createBlock, dy as SearchBox, m as createBaseVNode, l as script$2, bk as script$4, as as withModifiers, bn as script$5, ac as script$6, i as withDirectives, dz as _sfc_main$2, dA as KeyComboImpl, dB as KeybindingImpl, _ as _export_sfc } from "./index-Bv0b06LE.js";
import { g as script$1, h as script$3 } from "./index-CgMyWf7n.js";
import { u as useKeybindingService } from "./keybindingService-DyjX-nxF.js";
import "./index-Dzu9WL4p.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/KeybindingPanel-oavhFdkz.js
const _hoisted_1$1 = {
key: 0,
class: "px-2"
@ -96,6 +103,16 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
}
__name(removeKeybinding, "removeKeybinding");
function captureKeybinding(event) {
if (!event.shiftKey && !event.altKey && !event.ctrlKey && !event.metaKey) {
switch (event.key) {
case "Escape":
cancelEdit();
return;
case "Enter":
saveKeybinding();
return;
}
}
const keyCombo = KeyComboImpl.fromEvent(event);
newBindingKeyCombo.value = keyCombo;
}
@ -151,7 +168,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
value: commandsData.value,
selection: selectedCommandData.value,
"onUpdate:selection": _cache[1] || (_cache[1] = ($event) => selectedCommandData.value = $event),
"global-filter-fields": ["id"],
"global-filter-fields": ["id", "label"],
filters: filters.value,
selectionMode: "single",
stripedRows: "",
@ -216,7 +233,7 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
visible: editDialogVisible.value,
"onUpdate:visible": _cache[2] || (_cache[2] = ($event) => editDialogVisible.value = $event),
modal: "",
header: currentEditingCommand.value?.id,
header: currentEditingCommand.value?.label,
onHide: cancelEdit
}, {
footer: withCtx(() => [
@ -275,8 +292,12 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
};
}
});
const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-2554ab36"]]);
const KeybindingPanel = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-8454e24f"]]);
export {
KeybindingPanel as default
};
<<<<<<<< HEAD:comfy/web/assets/KeybindingPanel-BRfso_Vt.js
//# sourceMappingURL=KeybindingPanel-BRfso_Vt.js.map
========
//# sourceMappingURL=KeybindingPanel-oavhFdkz.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/KeybindingPanel-oavhFdkz.js

25635
comfy/web/assets/MaintenanceView-Bh8OZpgl.js generated vendored Normal file

File diff suppressed because one or more lines are too long

87
comfy/web/assets/MaintenanceView-DEJCj8SR.css generated vendored Normal file
View File

@ -0,0 +1,87 @@
.task-card-ok[data-v-c3bd7658] {
position: absolute;
right: -1rem;
bottom: -1rem;
grid-column: 1 / -1;
grid-row: 1 / -1;
--tw-text-opacity: 1;
color: rgb(150 206 76 / var(--tw-text-opacity));
opacity: 1;
transition-property: opacity;
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
transition-duration: 150ms;
font-size: 4rem;
text-shadow: 0.25rem 0 0.5rem black;
z-index: 10;
}
.p-card {
&[data-v-c3bd7658] {
transition-property: opacity;
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
transition-duration: 150ms;
--p-card-background: var(--p-button-secondary-background);
opacity: 0.9;
}
&.opacity-65[data-v-c3bd7658] {
opacity: 0.4;
}
&[data-v-c3bd7658]:hover {
opacity: 1;
}
}
[data-v-c3bd7658] .p-card-header {
z-index: 0;
}
[data-v-c3bd7658] .p-card-body {
z-index: 1;
flex-grow: 1;
justify-content: space-between;
}
.task-div {
> i[data-v-c3bd7658] {
pointer-events: none;
}
&:hover > i[data-v-c3bd7658] {
opacity: 0.2;
}
}
[data-v-dd50a7dd] .p-tag {
--p-tag-gap: 0.375rem;
}
.backspan[data-v-dd50a7dd]::before {
position: absolute;
margin: 0px;
color: var(--p-text-muted-color);
font-family: 'primeicons';
top: -2rem;
right: -2rem;
speak: none;
font-style: normal;
font-weight: normal;
font-variant: normal;
text-transform: none;
line-height: 1;
display: inline-block;
-webkit-font-smoothing: antialiased;
opacity: 0.02;
font-size: min(14rem, 90vw);
z-index: 0;
}

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/ManualConfigurationView-DlH3kpjW.js
import { d as defineComponent, K as useI18n, U as ref, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, a4 as script, a$ as script$1, l as script$2, b5 as electronAPI, _ as _export_sfc } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
========
import { d as defineComponent, I as useI18n, T as ref, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, a5 as script, b3 as script$1, l as script$2, b9 as electronAPI, _ as _export_sfc } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ManualConfigurationView-DTLyJ3VG.js
const _hoisted_1 = { class: "comfy-installer grow flex flex-col gap-4 text-neutral-300 max-w-110" };
const _hoisted_2 = { class: "text-2xl font-semibold text-neutral-100" };
const _hoisted_3 = { class: "m-1 text-neutral-300" };
@ -71,4 +76,8 @@ const ManualConfigurationView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scop
export {
ManualConfigurationView as default
};
<<<<<<<< HEAD:comfy/web/assets/ManualConfigurationView-DlH3kpjW.js
//# sourceMappingURL=ManualConfigurationView-DlH3kpjW.js.map
========
//# sourceMappingURL=ManualConfigurationView-DTLyJ3VG.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ManualConfigurationView-DTLyJ3VG.js

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/MetricsConsentView-BgqqjOyd.js
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
import { d as defineComponent, aR as useToast, K as useI18n, U as ref, be as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, a7 as createTextVNode, k as createVNode, j as unref, bn as script, l as script$1, b5 as electronAPI } from "./index-BsGgXmrT.js";
========
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
import { d as defineComponent, aV as useToast, I as useI18n, T as ref, bi as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, a8 as createTextVNode, k as createVNode, j as unref, br as script, l as script$1, b9 as electronAPI } from "./index-Bv0b06LE.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/MetricsConsentView-C80fk2cl.js
const _hoisted_1 = { class: "h-full p-8 2xl:p-16 flex flex-col items-center justify-center" };
const _hoisted_2 = { class: "bg-neutral-800 rounded-lg shadow-lg p-6 w-full max-w-[600px] flex flex-col gap-6" };
const _hoisted_3 = { class: "text-3xl font-semibold text-neutral-100" };
@ -83,4 +88,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
<<<<<<<< HEAD:comfy/web/assets/MetricsConsentView-BgqqjOyd.js
//# sourceMappingURL=MetricsConsentView-BgqqjOyd.js.map
========
//# sourceMappingURL=MetricsConsentView-C80fk2cl.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/MetricsConsentView-C80fk2cl.js

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/NotSupportedView-IH8EV0bV.js
import { d as defineComponent, be as useRouter, r as resolveDirective, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, i as withDirectives, _ as _export_sfc } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
========
import { d as defineComponent, bi as useRouter, r as resolveDirective, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, i as withDirectives, _ as _export_sfc } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/NotSupportedView-B78ZVR9Z.js
const _imports_0 = "" + new URL("images/sad_girl.png", import.meta.url).href;
const _hoisted_1 = { class: "sad-container" };
const _hoisted_2 = { class: "no-drag sad-text flex items-center" };
@ -83,4 +88,8 @@ const NotSupportedView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "
export {
NotSupportedView as default
};
<<<<<<<< HEAD:comfy/web/assets/NotSupportedView-IH8EV0bV.js
//# sourceMappingURL=NotSupportedView-IH8EV0bV.js.map
========
//# sourceMappingURL=NotSupportedView-B78ZVR9Z.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/NotSupportedView-B78ZVR9Z.js

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/ServerConfigPanel-u0ozNLZ4.js
import { o as openBlock, f as createElementBlock, m as createBaseVNode, H as markRaw, d as defineComponent, a as useSettingStore, ae as storeToRefs, O as watch, ds as useCopyToClipboard, K as useI18n, y as createBlock, z as withCtx, j as unref, bj as script, E as toDisplayString, D as renderList, F as Fragment, k as createVNode, l as script$1, B as createCommentVNode, bh as script$2, dt as FormItem, dh as _sfc_main$1, b5 as electronAPI } from "./index-BsGgXmrT.js";
import { u as useServerConfigStore } from "./serverConfigStore-B9riwnSX.js";
========
import { o as openBlock, f as createElementBlock, m as createBaseVNode, H as markRaw, d as defineComponent, a as useSettingStore, af as storeToRefs, N as watch, dJ as useCopyToClipboard, I as useI18n, y as createBlock, z as withCtx, j as unref, bn as script, E as toDisplayString, D as renderList, F as Fragment, k as createVNode, l as script$1, B as createCommentVNode, bl as script$2, dK as FormItem, dz as _sfc_main$1, b9 as electronAPI } from "./index-Bv0b06LE.js";
import { u as useServerConfigStore } from "./serverConfigStore-D2Vr0L0h.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ServerConfigPanel-BYrt6wyr.js
const _hoisted_1$1 = {
viewBox: "0 0 24 24",
width: "1.2em",
@ -153,4 +158,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
<<<<<<<< HEAD:comfy/web/assets/ServerConfigPanel-u0ozNLZ4.js
//# sourceMappingURL=ServerConfigPanel-u0ozNLZ4.js.map
========
//# sourceMappingURL=ServerConfigPanel-BYrt6wyr.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ServerConfigPanel-BYrt6wyr.js

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/ServerStartView-DgywG2so.js
import { d as defineComponent, K as useI18n, U as ref, bk as ProgressStatus, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, a7 as createTextVNode, E as toDisplayString, j as unref, f as createElementBlock, B as createCommentVNode, k as createVNode, l as script, i as withDirectives, v as vShow, bl as BaseTerminal, b5 as electronAPI, _ as _export_sfc } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
========
import { d as defineComponent, I as useI18n, T as ref, bo as ProgressStatus, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, a8 as createTextVNode, E as toDisplayString, j as unref, f as createElementBlock, B as createCommentVNode, k as createVNode, l as script, i as withDirectives, v as vShow, bp as BaseTerminal, b9 as electronAPI, _ as _export_sfc } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ServerStartView-B7TlHxYo.js
const _hoisted_1 = { class: "flex flex-col w-full h-full items-center" };
const _hoisted_2 = { class: "text-2xl font-bold" };
const _hoisted_3 = { key: 0 };
@ -93,8 +98,12 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
};
}
});
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-4140d62b"]]);
const ServerStartView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-e6ba9633"]]);
export {
ServerStartView as default
};
<<<<<<<< HEAD:comfy/web/assets/ServerStartView-DgywG2so.js
//# sourceMappingURL=ServerStartView-DgywG2so.js.map
========
//# sourceMappingURL=ServerStartView-B7TlHxYo.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/ServerStartView-B7TlHxYo.js

View File

@ -1,5 +1,5 @@
[data-v-4140d62b] .xterm-helper-textarea {
[data-v-e6ba9633] .xterm-helper-textarea {
/* Hide this as it moves all over when uv is running */
display: none;
}

1061
comfy/web/assets/TerminalOutputDrawer-CKr7Br7O.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/UserSelectView-DkeVSFwW.js
import { d as defineComponent, aj as useUserStore, be as useRouter, U as ref, c as computed, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, bf as withKeys, j as unref, bg as script, bh as script$1, bi as script$2, bj as script$3, a7 as createTextVNode, B as createCommentVNode, l as script$4 } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
========
import { d as defineComponent, ak as useUserStore, bi as useRouter, T as ref, c as computed, p as onMounted, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, bj as withKeys, j as unref, bk as script, bl as script$1, bm as script$2, bn as script$3, a8 as createTextVNode, B as createCommentVNode, l as script$4 } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/UserSelectView-C703HOyO.js
const _hoisted_1 = {
id: "comfy-user-selection",
class: "min-w-84 relative rounded-lg bg-[var(--comfy-menu-bg)] p-5 px-10 shadow-lg"
@ -98,4 +103,8 @@ const _sfc_main = /* @__PURE__ */ defineComponent({
export {
_sfc_main as default
};
<<<<<<<< HEAD:comfy/web/assets/UserSelectView-DkeVSFwW.js
//# sourceMappingURL=UserSelectView-DkeVSFwW.js.map
========
//# sourceMappingURL=UserSelectView-C703HOyO.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/UserSelectView-C703HOyO.js

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/WelcomeView-CXVMqRFA.js
import { d as defineComponent, be as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, _ as _export_sfc } from "./index-BsGgXmrT.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-DDUNNAbV.js";
========
import { d as defineComponent, bi as useRouter, o as openBlock, y as createBlock, z as withCtx, m as createBaseVNode, E as toDisplayString, k as createVNode, j as unref, l as script, _ as _export_sfc } from "./index-Bv0b06LE.js";
import { _ as _sfc_main$1 } from "./BaseViewTemplate-BTbuZf5t.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/WelcomeView-DIFvbWc2.js
const _hoisted_1 = { class: "flex flex-col items-center justify-center gap-8 p-8" };
const _hoisted_2 = { class: "animated-gradient-text text-glow select-none" };
const _sfc_main = /* @__PURE__ */ defineComponent({
@ -36,4 +41,8 @@ const WelcomeView = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-
export {
WelcomeView as default
};
<<<<<<<< HEAD:comfy/web/assets/WelcomeView-CXVMqRFA.js
//# sourceMappingURL=WelcomeView-CXVMqRFA.js.map
========
//# sourceMappingURL=WelcomeView-DIFvbWc2.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/WelcomeView-DIFvbWc2.js

618
comfy/web/assets/index-A_bXPJCN.js generated vendored Normal file

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -306,6 +306,7 @@
.litegraph .dialog .dialog-footer {
height: 50px;
padding: 10px;
margin: 0;
border-top: 1px solid #1a1a1a;
}
@ -442,63 +443,6 @@
color: black;
}
.litegraph .subgraph_property {
padding: 4px;
}
.litegraph .subgraph_property:hover {
background-color: #333;
}
.litegraph .subgraph_property.extra {
margin-top: 8px;
}
.litegraph .subgraph_property span.name {
font-size: 1.3em;
padding-left: 4px;
}
.litegraph .subgraph_property span.type {
opacity: 0.5;
margin-right: 20px;
padding-left: 4px;
}
.litegraph .subgraph_property span.label {
display: inline-block;
width: 60px;
padding: 0px 10px;
}
.litegraph .subgraph_property input {
width: 140px;
color: #999;
background-color: #1a1a1a;
border-radius: 4px;
border: 0;
margin-right: 10px;
padding: 4px;
padding-left: 10px;
}
.litegraph .subgraph_property button {
background-color: #1c1c1c;
color: #aaa;
border: 0;
border-radius: 2px;
padding: 4px 10px;
cursor: pointer;
}
.litegraph .subgraph_property.extra {
color: #ccc;
}
.litegraph .subgraph_property.extra input {
background-color: #111;
}
.litegraph .bullet_icon {
margin-left: 10px;
border-radius: 10px;
@ -661,21 +605,6 @@
.litegraph .dialog .dialog-content {
display: block;
}
.litegraph .dialog .dialog-content .subgraph_property {
padding: 5px;
}
.litegraph .dialog .dialog-footer {
margin: 0;
}
.litegraph .dialog .dialog-footer .subgraph_property {
margin-top: 0;
display: flex;
align-items: center;
padding: 5px;
}
.litegraph .dialog .dialog-footer .subgraph_property .name {
flex: 1;
}
.litegraph .graphdialog {
display: flex;
align-items: center;
@ -2110,6 +2039,12 @@
.-right-4{
right: -1rem;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
========
.bottom-0{
bottom: 0px;
}
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
.bottom-\[10px\]{
bottom: 10px;
}
@ -2119,6 +2054,15 @@
.left-0{
left: 0px;
}
.left-1\/2{
left: 50%;
}
.left-12{
left: 3rem;
}
.left-2{
left: 0.5rem;
}
.left-\[-350px\]{
left: -350px;
}
@ -2128,6 +2072,9 @@
.top-0{
top: 0px;
}
.top-2{
top: 0.5rem;
}
.top-\[50px\]{
top: 50px;
}
@ -2137,6 +2084,9 @@
.z-10{
z-index: 10;
}
.z-20{
z-index: 20;
}
.z-\[1000\]{
z-index: 1000;
}
@ -2192,6 +2142,10 @@
margin-top: 1rem;
margin-bottom: 1rem;
}
.my-8{
margin-top: 2rem;
margin-bottom: 2rem;
}
.mb-2{
margin-bottom: 0.5rem;
}
@ -2240,6 +2194,9 @@
.mt-5{
margin-top: 1.25rem;
}
.mt-6{
margin-top: 1.5rem;
}
.block{
display: block;
}
@ -2279,6 +2236,9 @@
.h-16{
height: 4rem;
}
.h-48{
height: 12rem;
}
.h-6{
height: 1.5rem;
}
@ -2324,6 +2284,9 @@
.min-h-screen{
min-height: 100vh;
}
.w-0{
width: 0px;
}
.w-1\/2{
width: 50%;
}
@ -2336,12 +2299,21 @@
.w-16{
width: 4rem;
}
.w-24{
width: 6rem;
}
.w-28{
width: 7rem;
}
.w-3{
width: 0.75rem;
}
.w-3\/12{
width: 25%;
}
.w-32{
width: 8rem;
}
.w-44{
width: 11rem;
}
@ -2451,6 +2423,9 @@
.cursor-pointer{
cursor: pointer;
}
.touch-none{
touch-action: none;
}
.select-none{
-webkit-user-select: none;
-moz-user-select: none;
@ -2633,7 +2608,11 @@
}
.border-neutral-700{
--tw-border-opacity: 1;
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
border-color: rgb(64 64 64 / var(--tw-border-opacity, 1));
========
border-color: rgb(64 64 64 / var(--tw-border-opacity));
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
}
.bg-\[var\(--comfy-menu-bg\)\]{
background-color: var(--comfy-menu-bg);
@ -2886,6 +2865,10 @@
--tw-text-opacity: 1;
color: rgb(239 68 68 / var(--tw-text-opacity, 1));
}
.text-white{
--tw-text-opacity: 1;
color: rgb(255 255 255 / var(--tw-text-opacity));
}
.underline{
text-decoration-line: underline;
}
@ -2967,6 +2950,9 @@
.duration-100{
transition-duration: 100ms;
}
.duration-200{
transition-duration: 200ms;
}
.duration-300{
transition-duration: 300ms;
}
@ -3025,8 +3011,6 @@ body {
height: 100vh;
margin: 0;
overflow: hidden;
grid-template-columns: auto 1fr auto;
grid-template-rows: auto 1fr auto;
background: var(--bg-color) var(--bg-img);
color: var(--fg-color);
min-height: -webkit-fill-available;
@ -3036,87 +3020,6 @@ body {
font-family: Arial, sans-serif;
}
/**
+------------------+------------------+------------------+
| |
| .comfyui-body- |
| top |
| (spans all cols) |
| |
+------------------+------------------+------------------+
| | | |
| .comfyui-body- | #graph-canvas | .comfyui-body- |
| left | | right |
| | | |
| | | |
+------------------+------------------+------------------+
| |
| .comfyui-body- |
| bottom |
| (spans all cols) |
| |
+------------------+------------------+------------------+
*/
.comfyui-body-top {
order: -5;
/* Span across all columns */
grid-column: 1/-1;
/* Position at the first row */
grid-row: 1;
/* Top menu bar dropdown needs to be above of graph canvas splitter overlay which is z-index: 999 */
/* Top menu bar z-index needs to be higher than bottom menu bar z-index as by default
pysssss's image feed is located at body-bottom, and it can overlap with the queue button, which
is located in body-top. */
z-index: 1001;
display: flex;
flex-direction: column;
}
.comfyui-body-left {
order: -4;
/* Position in the first column */
grid-column: 1;
/* Position below the top element */
grid-row: 2;
z-index: 10;
display: flex;
}
.graph-canvas-container {
width: 100%;
height: 100%;
order: -3;
grid-column: 2;
grid-row: 2;
position: relative;
overflow: hidden;
}
#graph-canvas {
width: 100%;
height: 100%;
touch-action: none;
}
.comfyui-body-right {
order: -2;
z-index: 10;
grid-column: 3;
grid-row: 2;
}
.comfyui-body-bottom {
order: 4;
/* Span across all columns */
grid-column: 1/-1;
grid-row: 3;
/* Bottom menu bar dropdown needs to be above of graph canvas splitter overlay which is z-index: 999 */
z-index: 1000;
display: flex;
flex-direction: column;
}
.comfy-multiline-input {
background-color: var(--comfy-input-bg);
color: var(--input-text);
@ -3531,84 +3434,6 @@ dialog::backdrop {
justify-content: center;
}
#comfy-settings-dialog {
padding: 0;
width: 41rem;
}
#comfy-settings-dialog tr > td:first-child {
text-align: right;
}
#comfy-settings-dialog tbody button,
#comfy-settings-dialog table > button {
background-color: var(--bg-color);
border: 1px var(--border-color) solid;
border-radius: 0;
color: var(--input-text);
font-size: 1rem;
padding: 0.5rem;
}
#comfy-settings-dialog button:hover {
background-color: var(--tr-odd-bg-color);
}
/* General CSS for tables */
.comfy-table {
border-collapse: collapse;
color: var(--input-text);
font-family: Arial, sans-serif;
width: 100%;
}
.comfy-table caption {
position: sticky;
top: 0;
background-color: var(--bg-color);
color: var(--input-text);
font-size: 1rem;
font-weight: bold;
padding: 8px;
text-align: center;
border-bottom: 1px solid var(--border-color);
}
.comfy-table caption .comfy-btn {
position: absolute;
top: -2px;
right: 0;
bottom: 0;
cursor: pointer;
border: none;
height: 100%;
border-radius: 0;
aspect-ratio: 1/1;
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
font-size: 20px;
}
.comfy-table caption .comfy-btn:focus {
outline: none;
}
.comfy-table tr:nth-child(even) {
background-color: var(--tr-even-bg-color);
}
.comfy-table tr:nth-child(odd) {
background-color: var(--tr-odd-bg-color);
}
.comfy-table td,
.comfy-table th {
border: 1px solid var(--border-color);
padding: 8px;
}
/* Context menu */
.litegraph .dialog {
@ -3708,24 +3533,6 @@ dialog::backdrop {
will-change: transform;
}
@media only screen and (max-width: 450px) {
#comfy-settings-dialog .comfy-table tbody {
display: grid;
}
#comfy-settings-dialog .comfy-table tr {
display: grid;
}
#comfy-settings-dialog tr > td:first-child {
text-align: center;
border-bottom: none;
padding-bottom: 0;
}
#comfy-settings-dialog tr > td:not(:first-child) {
text-align: center;
border-top: none;
}
}
audio.comfy-audio.empty-audio-widget {
display: none;
}
@ -3736,7 +3543,6 @@ audio.comfy-audio.empty-audio-widget {
left: 0;
width: 100%;
height: 100%;
pointer-events: none;
}
/* Set auto complete panel's width as it is not accessible within vue-root */
@ -3799,6 +3605,39 @@ audio.comfy-audio.empty-audio-widget {
.hover\:opacity-100:hover{
opacity: 1;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
========
@media (prefers-reduced-motion: no-preference){
.motion-safe\:w-0{
width: 0px;
}
.motion-safe\:opacity-0{
opacity: 0;
}
.group\/sidebar-tab:focus-within .motion-safe\:group-focus-within\/sidebar-tab\:w-auto{
width: auto;
}
.group\/sidebar-tab:focus-within .motion-safe\:group-focus-within\/sidebar-tab\:opacity-100{
opacity: 1;
}
.group\/sidebar-tab:hover .motion-safe\:group-hover\/sidebar-tab\:w-auto{
width: auto;
}
.group\/sidebar-tab:hover .motion-safe\:group-hover\/sidebar-tab\:opacity-100{
opacity: 1;
}
.group\/tree-node:hover .motion-safe\:group-hover\/tree-node\:opacity-100{
opacity: 1;
}
}
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
@media not all and (min-width: 640px){
.max-sm\:hidden{
@ -3886,7 +3725,7 @@ audio.comfy-audio.empty-audio-widget {
padding-top: 0px
}
.prompt-dialog-content[data-v-3df70997] {
.prompt-dialog-content[data-v-4f1e3bbe] {
white-space: pre-wrap;
}
@ -3904,17 +3743,29 @@ audio.comfy-audio.empty-audio-widget {
margin-bottom: 1rem;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
.comfy-error-report[data-v-3faf7785] {
========
.comfy-error-report[data-v-e5000be2] {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
display: flex;
flex-direction: column;
gap: 1rem;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
.action-container[data-v-3faf7785] {
========
.action-container[data-v-e5000be2] {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
display: flex;
gap: 1rem;
justify-content: flex-end;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
.wrapper-pre[data-v-3faf7785] {
========
.wrapper-pre[data-v-e5000be2] {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
white-space: pre-wrap;
word-wrap: break-word;
}
@ -3983,6 +3834,7 @@ audio.comfy-audio.empty-audio-widget {
padding: 0px;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
.form-input[data-v-1451da7b] .input-slider .p-inputnumber input,
.form-input[data-v-1451da7b] .input-slider .slider-part {
@ -3990,6 +3842,15 @@ audio.comfy-audio.empty-audio-widget {
}
.form-input[data-v-1451da7b] .p-inputtext,
.form-input[data-v-1451da7b] .p-select {
========
.form-input[data-v-a29c257f] .input-slider .p-inputnumber input,
.form-input[data-v-a29c257f] .input-slider .slider-part {
width: 5rem
}
.form-input[data-v-a29c257f] .p-inputtext,
.form-input[data-v-a29c257f] .p-select {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
width: 11rem
}
@ -4279,26 +4140,26 @@ audio.comfy-audio.empty-audio-widget {
position: relative;
}
[data-v-250ab9af] .p-terminal .xterm {
[data-v-873a313f] .p-terminal .xterm {
overflow-x: auto;
}
[data-v-250ab9af] .p-terminal .xterm-screen {
[data-v-873a313f] .p-terminal .xterm-screen {
background-color: black;
overflow-y: hidden;
}
[data-v-90a7f075] .p-terminal .xterm {
[data-v-14fef2e4] .p-terminal .xterm {
overflow-x: auto;
}
[data-v-90a7f075] .p-terminal .xterm-screen {
[data-v-14fef2e4] .p-terminal .xterm-screen {
background-color: black;
overflow-y: hidden;
}
[data-v-03daf1c8] .p-terminal .xterm {
[data-v-cf0c7d52] .p-terminal .xterm {
overflow-x: auto;
}
[data-v-03daf1c8] .p-terminal .xterm-screen {
[data-v-cf0c7d52] .p-terminal .xterm-screen {
background-color: black;
overflow-y: hidden;
}
@ -4610,28 +4471,32 @@ audio.comfy-audio.empty-audio-widget {
box-sizing: border-box;
}
.tree-node[data-v-a6457774] {
.tree-node[data-v-a945b5a8] {
width: 100%;
display: flex;
align-items: center;
justify-content: space-between;
}
.leaf-count-badge[data-v-a6457774] {
.leaf-count-badge[data-v-a945b5a8] {
margin-left: 0.5rem;
}
.node-content[data-v-a6457774] {
.node-content[data-v-a945b5a8] {
display: flex;
align-items: center;
flex-grow: 1;
}
.leaf-label[data-v-a6457774] {
.leaf-label[data-v-a945b5a8] {
margin-left: 0.5rem;
}
[data-v-a6457774] .editable-text span {
[data-v-a945b5a8] .editable-text span {
word-break: break-all;
}
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
[data-v-243f3ee3] .tree-explorer-node-label {
========
[data-v-e3a237e6] .tree-explorer-node-label {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
width: 100%;
display: flex;
align-items: center;
@ -4644,10 +4509,17 @@ audio.comfy-audio.empty-audio-widget {
* By setting the position to relative on the parent and using an absolutely positioned pseudo-element,
* we can create a visual indicator for the drop target without affecting the layout of other elements.
*/
<<<<<<<< HEAD:comfy/web/assets/index-ChXzdVeQ.css
[data-v-243f3ee3] .p-tree-node-content:has(.tree-folder) {
position: relative;
}
[data-v-243f3ee3] .p-tree-node-content:has(.tree-folder.can-drop)::after {
========
[data-v-e3a237e6] .p-tree-node-content:has(.tree-folder) {
position: relative;
}
[data-v-e3a237e6] .p-tree-node-content:has(.tree-folder.can-drop)::after {
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CBxvvAzM.css
content: '';
position: absolute;
top: 0;
@ -4658,21 +4530,21 @@ audio.comfy-audio.empty-audio-widget {
pointer-events: none;
}
[data-v-5e759e25] .p-toolbar-end .p-button {
[data-v-0061c432] .p-toolbar-end .p-button {
padding-top: 0.25rem;
padding-bottom: 0.25rem
}
@media (min-width: 1536px) {
[data-v-5e759e25] .p-toolbar-end .p-button {
[data-v-0061c432] .p-toolbar-end .p-button {
padding-top: 0.5rem;
padding-bottom: 0.5rem
}
}
[data-v-5e759e25] .p-toolbar-start {
[data-v-0061c432] .p-toolbar-start {
min-width: 0px;
@ -4750,36 +4622,11 @@ audio.comfy-audio.empty-audio-widget {
vertical-align: top;
}
[data-v-0bb2ac55] .pi-fake-spacer {
[data-v-3be51840] .pi-fake-spacer {
height: 1px;
width: 16px;
}
._content[data-v-c4279e6b] {
display: flex;
flex-direction: column
}
._content[data-v-c4279e6b] > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
}
._footer[data-v-c4279e6b] {
display: flex;
flex-direction: column;
align-items: flex-end;
padding-top: 1rem
}
.slot_row[data-v-d9792337] {
padding: 2px;
}
@ -4907,6 +4754,31 @@ audio.comfy-audio.empty-audio-widget {
color: var(--error-text);
}
._content[data-v-c4279e6b] {
display: flex;
flex-direction: column
}
._content[data-v-c4279e6b] > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
margin-top: calc(0.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(0.5rem * var(--tw-space-y-reverse))
}
._footer[data-v-c4279e6b] {
display: flex;
flex-direction: column;
align-items: flex-end;
padding-top: 1rem
}
.node-lib-node-container[data-v-da9a8962] {
height: 100%;
width: 100%

View File

@ -1,7 +1,12 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/index-Br6dw1F6.js
import { bt as BaseStyle, bu as script$s, bT as script$t, o as openBlock, f as createElementBlock, as as mergeProps, m as createBaseVNode, E as toDisplayString, bM as Ripple, r as resolveDirective, i as withDirectives, y as createBlock, C as resolveDynamicComponent, bi as script$u, bD as resolveComponent, ai as normalizeClass, ci as createSlots, z as withCtx, aU as script$v, c9 as script$w, F as Fragment, D as renderList, a7 as createTextVNode, c3 as setAttribute, cp as normalizeProps, A as renderSlot, B as createCommentVNode, bU as script$x, c8 as equals, cu as script$y, br as script$z, cy as getFirstFocusableElement, c2 as OverlayEventBus, cO as getVNodeProp, c6 as resolveFieldData, dl as invokeElementMethod, bJ as getAttribute, cP as getNextElementSibling, bZ as getOuterWidth, cQ as getPreviousElementSibling, l as script$A, bL as script$B, bO as script$C, bC as script$E, c7 as isNotEmpty, ar as withModifiers, c$ as getOuterHeight, bN as UniqueComponentId, cS as _default, bv as ZIndex, bx as focus, bV as addStyle, b_ as absolutePosition, bW as ConnectedOverlayScrollHandler, bX as isTouchDevice, dm as FilterOperator, bB as script$F, cm as script$G, bA as FocusTrap, k as createVNode, bE as Transition, bf as withKeys, c0 as getIndex, co as script$H, cR as isClickable, cT as clearSelection, c4 as localeComparator, ch as sort, cA as FilterService, df as FilterMatchMode, bI as findSingle, cD as findIndexInList, b$ as find, dn as exportCSV, cL as getOffset, cU as isRTL, dp as getHiddenElementOuterWidth, dq as getHiddenElementOuterHeight, dr as reorderArray, bQ as removeClass, bw as addClass, cc as isEmpty, cB as script$I, ce as script$J } from "./index-BsGgXmrT.js";
import { s as script$D } from "./index-COyiXDAn.js";
========
import { bG as BaseStyle, bH as script$s, bX as script$t, o as openBlock, f as createElementBlock, at as mergeProps, m as createBaseVNode, E as toDisplayString, bO as Ripple, r as resolveDirective, i as withDirectives, y as createBlock, C as resolveDynamicComponent, bm as script$u, bR as resolveComponent, aj as normalizeClass, cp as createSlots, z as withCtx, aY as script$v, cf as script$w, F as Fragment, D as renderList, a8 as createTextVNode, c8 as setAttribute, cx as normalizeProps, A as renderSlot, B as createCommentVNode, bY as script$x, ce as equals, cF as script$y, bv as script$z, cJ as getFirstFocusableElement, c7 as OverlayEventBus, cZ as getVNodeProp, cc as resolveFieldData, dD as invokeElementMethod, bK as getAttribute, c_ as getNextElementSibling, c2 as getOuterWidth, c$ as getPreviousElementSibling, l as script$A, bN as script$B, bQ as script$C, cl as script$E, cd as isNotEmpty, as as withModifiers, da as getOuterHeight, bP as UniqueComponentId, d1 as _default, bZ as ZIndex, bL as focus, b_ as addStyle, c3 as absolutePosition, b$ as ConnectedOverlayScrollHandler, c0 as isTouchDevice, dE as FilterOperator, ca as script$F, ct as script$G, cB as FocusTrap, k as createVNode, bI as Transition, bj as withKeys, c5 as getIndex, cv as script$H, d0 as isClickable, d2 as clearSelection, c9 as localeComparator, co as sort, cL as FilterService, dx as FilterMatchMode, bJ as findSingle, cO as findIndexInList, c4 as find, dF as exportCSV, cW as getOffset, d3 as isRTL, dG as getHiddenElementOuterWidth, dH as getHiddenElementOuterHeight, dI as reorderArray, bT as removeClass, bU as addClass, ci as isEmpty, cM as script$I, ck as script$J } from "./index-Bv0b06LE.js";
import { s as script$D } from "./index-Dzu9WL4p.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CgMyWf7n.js
var ColumnStyle = BaseStyle.extend({
name: "column"
});
@ -8787,4 +8792,8 @@ export {
script as h,
script$l as s
};
<<<<<<<< HEAD:comfy/web/assets/index-Br6dw1F6.js
//# sourceMappingURL=index-Br6dw1F6.js.map
========
//# sourceMappingURL=index-CgMyWf7n.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/index-CgMyWf7n.js

27
comfy/web/assets/index-Dzu9WL4p.js generated vendored Normal file
View File

@ -0,0 +1,27 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
import { bX as script$1, o as openBlock, f as createElementBlock, at as mergeProps, m as createBaseVNode } from "./index-Bv0b06LE.js";
var script = {
name: "BarsIcon",
"extends": script$1
};
function render(_ctx, _cache, $props, $setup, $data, $options) {
return openBlock(), createElementBlock("svg", mergeProps({
width: "14",
height: "14",
viewBox: "0 0 14 14",
fill: "none",
xmlns: "http://www.w3.org/2000/svg"
}, _ctx.pti()), _cache[0] || (_cache[0] = [createBaseVNode("path", {
"fill-rule": "evenodd",
"clip-rule": "evenodd",
d: "M13.3226 3.6129H0.677419C0.497757 3.6129 0.325452 3.54152 0.198411 3.41448C0.0713707 3.28744 0 3.11514 0 2.93548C0 2.75581 0.0713707 2.58351 0.198411 2.45647C0.325452 2.32943 0.497757 2.25806 0.677419 2.25806H13.3226C13.5022 2.25806 13.6745 2.32943 13.8016 2.45647C13.9286 2.58351 14 2.75581 14 2.93548C14 3.11514 13.9286 3.28744 13.8016 3.41448C13.6745 3.54152 13.5022 3.6129 13.3226 3.6129ZM13.3226 7.67741H0.677419C0.497757 7.67741 0.325452 7.60604 0.198411 7.479C0.0713707 7.35196 0 7.17965 0 6.99999C0 6.82033 0.0713707 6.64802 0.198411 6.52098C0.325452 6.39394 0.497757 6.32257 0.677419 6.32257H13.3226C13.5022 6.32257 13.6745 6.39394 13.8016 6.52098C13.9286 6.64802 14 6.82033 14 6.99999C14 7.17965 13.9286 7.35196 13.8016 7.479C13.6745 7.60604 13.5022 7.67741 13.3226 7.67741ZM0.677419 11.7419H13.3226C13.5022 11.7419 13.6745 11.6706 13.8016 11.5435C13.9286 11.4165 14 11.2442 14 11.0645C14 10.8848 13.9286 10.7125 13.8016 10.5855C13.6745 10.4585 13.5022 10.3871 13.3226 10.3871H0.677419C0.497757 10.3871 0.325452 10.4585 0.198411 10.5855C0.0713707 10.7125 0 10.8848 0 11.0645C0 11.2442 0.0713707 11.4165 0.198411 11.5435C0.325452 11.6706 0.497757 11.7419 0.677419 11.7419Z",
fill: "currentColor"
}, null, -1)]), 16);
}
__name(render, "render");
script.render = render;
export {
script as s
};
//# sourceMappingURL=index-Dzu9WL4p.js.map

539
comfy/web/assets/index-SeIZOWJp.js generated vendored Normal file

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,10 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/keybindingService-DoUb2RT6.js
import { an as useKeybindingStore, L as useCommandStore, a as useSettingStore, di as KeyComboImpl, dj as KeybindingImpl } from "./index-BsGgXmrT.js";
========
import { ao as useKeybindingStore, J as useCommandStore, a as useSettingStore, dA as KeyComboImpl, dB as KeybindingImpl } from "./index-Bv0b06LE.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/keybindingService-DyjX-nxF.js
const CORE_KEYBINDINGS = [
{
combo: {
@ -186,7 +190,7 @@ const useKeybindingService = /* @__PURE__ */ __name(() => {
return;
}
const target = event.composedPath()[0];
if (!keyCombo.hasModifier && (target.tagName === "TEXTAREA" || target.tagName === "INPUT" || target.tagName === "SPAN" && target.classList.contains("property_value"))) {
if (keyCombo.isReservedByTextInput && (target.tagName === "TEXTAREA" || target.tagName === "INPUT" || target.tagName === "SPAN" && target.classList.contains("property_value"))) {
return;
}
const keybinding = keybindingStore.getKeybinding(keyCombo);
@ -247,4 +251,8 @@ const useKeybindingService = /* @__PURE__ */ __name(() => {
export {
useKeybindingService as u
};
<<<<<<<< HEAD:comfy/web/assets/keybindingService-DoUb2RT6.js
//# sourceMappingURL=keybindingService-DoUb2RT6.js.map
========
//# sourceMappingURL=keybindingService-DyjX-nxF.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/keybindingService-DyjX-nxF.js

View File

@ -1,6 +1,10 @@
var __defProp = Object.defineProperty;
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
<<<<<<<< HEAD:comfy/web/assets/serverConfigStore-B9riwnSX.js
import { I as defineStore, U as ref, c as computed } from "./index-BsGgXmrT.js";
========
import { a1 as defineStore, T as ref, c as computed } from "./index-Bv0b06LE.js";
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/serverConfigStore-D2Vr0L0h.js
const useServerConfigStore = defineStore("serverConfig", () => {
const serverConfigById = ref({});
const serverConfigs = computed(() => {
@ -87,4 +91,8 @@ const useServerConfigStore = defineStore("serverConfig", () => {
export {
useServerConfigStore as u
};
<<<<<<<< HEAD:comfy/web/assets/serverConfigStore-B9riwnSX.js
//# sourceMappingURL=serverConfigStore-B9riwnSX.js.map
========
//# sourceMappingURL=serverConfigStore-D2Vr0L0h.js.map
>>>>>>>> 96d891cb94d90f220e066cebad349887137f07a6:comfy/web/assets/serverConfigStore-D2Vr0L0h.js

16
comfy/web/assets/uvMirrors-B-HKMf6X.js generated vendored Normal file
View File

@ -0,0 +1,16 @@
const PYTHON_MIRROR = {
settingId: "Comfy-Desktop.UV.PythonInstallMirror",
mirror: "https://github.com/astral-sh/python-build-standalone/releases/download",
fallbackMirror: "https://bgithub.xyz/astral-sh/python-build-standalone/releases/download",
validationPathSuffix: "/20250115/cpython-3.10.16+20250115-aarch64-apple-darwin-debug-full.tar.zst.sha256"
};
const PYPI_MIRROR = {
settingId: "Comfy-Desktop.UV.PypiInstallMirror",
mirror: "https://pypi.org/simple/",
fallbackMirror: "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
};
export {
PYTHON_MIRROR as P,
PYPI_MIRROR as a
};
//# sourceMappingURL=uvMirrors-B-HKMf6X.js.map

30
comfy/web/index.html vendored
View File

@ -1,15 +1,15 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>ComfyUI</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<link rel="stylesheet" type="text/css" href="user.css" />
<link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" />
<script type="module" crossorigin src="./assets/index-BsGgXmrT.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-ChXzdVeQ.css">
</head>
<body class="litegraph grid">
<div id="vue-app"></div>
</body>
</html>
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>ComfyUI</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<link rel="stylesheet" type="text/css" href="user.css" />
<link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" />
<script type="module" crossorigin src="./assets/index-Bv0b06LE.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-CBxvvAzM.css">
</head>
<body class="litegraph grid">
<div id="vue-app"></div>
</body>
</html>

2
comfy/web/scripts/domWidget.js vendored Normal file
View File

@ -0,0 +1,2 @@
// Shim for scripts/domWidget.ts
export const DOMWidgetImpl = window.comfyAPI.domWidget.DOMWidgetImpl;

View File

@ -330,7 +330,7 @@
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"v1-5-pruned-emaonly.safetensors"
"v1-5-pruned-emaonly-fp16.safetensors"
]
}
],
@ -440,8 +440,8 @@
"extra": {},
"version": 0.4,
"models": [{
"name": "v1-5-pruned-emaonly.safetensors",
"url": "https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly.safetensors?download=true",
"name": "v1-5-pruned-emaonly-fp16.safetensors",
"url": "https://huggingface.co/Comfy-Org/stable-diffusion-v1-5-archive/resolve/main/v1-5-pruned-emaonly-fp16.safetensors?download=true",
"directory": "checkpoints"
}]
}

View File

@ -23,10 +23,7 @@ class Load3D():
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
}}
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
@ -38,22 +35,14 @@ class Load3D():
CATEGORY = "3d"
def process(self, model_file, image, **kwargs):
if isinstance(image, dict):
image_path = folder_paths.get_annotated_filepath(image['image'])
mask_path = folder_paths.get_annotated_filepath(image['mask'])
image_path = folder_paths.get_annotated_filepath(image['image'])
mask_path = folder_paths.get_annotated_filepath(image['mask'])
load_image_node = nodes.LoadImage()
output_image, ignore_mask = load_image_node.load_image(image=image_path)
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
load_image_node = nodes.LoadImage()
output_image, ignore_mask = load_image_node.load_image(image=image_path)
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
return output_image, output_mask, model_file,
else:
# to avoid the format is not dict which will happen the FE code is not compatibility to core,
# we need to this to double-check, it can be removed after merged FE into the core
image_path = folder_paths.get_annotated_filepath(image)
load_image_node = nodes.LoadImage()
output_image, output_mask = load_image_node.load_image(image=image_path)
return output_image, output_mask, model_file,
return output_image, output_mask, model_file,
class Load3DAnimation():
@ -71,11 +60,7 @@ class Load3DAnimation():
"width": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
"material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"animation_speed": (["0.1", "0.5", "1", "1.5", "2"], {"default": "1"}),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
}}
RETURN_TYPES = ("IMAGE", "MASK", "STRING")
@ -87,20 +72,14 @@ class Load3DAnimation():
CATEGORY = "3d"
def process(self, model_file, image, **kwargs):
if isinstance(image, dict):
image_path = folder_paths.get_annotated_filepath(image['image'])
mask_path = folder_paths.get_annotated_filepath(image['mask'])
image_path = folder_paths.get_annotated_filepath(image['image'])
mask_path = folder_paths.get_annotated_filepath(image['mask'])
load_image_node = nodes.LoadImage()
output_image, ignore_mask = load_image_node.load_image(image=image_path)
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
load_image_node = nodes.LoadImage()
output_image, ignore_mask = load_image_node.load_image(image=image_path)
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
return output_image, output_mask, model_file,
else:
image_path = folder_paths.get_annotated_filepath(image)
load_image_node = nodes.LoadImage()
output_image, output_mask = load_image_node.load_image(image=image_path)
return output_image, output_mask, model_file,
return output_image, output_mask, model_file,
class Preview3D():
@ -109,10 +88,27 @@ class Preview3D():
return {"required": {
"model_file": ("STRING", {"default": "", "multiline": False}),
"material": (["original", "normal", "wireframe", "depth"],),
"bg_color": ("STRING", {"default": "#000000", "multiline": False}),
"light_intensity": ("INT", {"default": 10, "min": 1, "max": 20, "step": 1}),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
"fov": ("INT", {"default": 75, "min": 10, "max": 150, "step": 1}),
}}
OUTPUT_NODE = True
RETURN_TYPES = ()
CATEGORY = "3d"
FUNCTION = "process"
EXPERIMENTAL = True
def process(self, model_file, **kwargs):
return {"ui": {"model_file": [model_file]}, "result": ()}
class Preview3DAnimation():
@classmethod
def INPUT_TYPES(s):
return {"required": {
"model_file": ("STRING", {"default": "", "multiline": False}),
"material": (["original", "normal", "wireframe", "depth"],),
"up_direction": (["original", "-x", "+x", "-y", "+y", "-z", "+z"],),
}}
OUTPUT_NODE = True
@ -130,11 +126,13 @@ class Preview3D():
NODE_CLASS_MAPPINGS = {
"Load3D": Load3D,
"Load3DAnimation": Load3DAnimation,
"Preview3D": Preview3D
"Preview3D": Preview3D,
"Preview3DAnimation": Preview3DAnimation
}
NODE_DISPLAY_NAME_MAPPINGS = {
"Load3D": "Load 3D",
"Load3DAnimation": "Load 3D - Animation",
"Preview3D": "Preview 3D"
"Preview3D": "Preview 3D",
"Preview3DAnimation": "Preview 3D - Animation"
}

View File

@ -2,6 +2,8 @@ import comfy.sd
import comfy.model_sampling
import comfy.latent_formats
import torch
import node_helpers
from comfy.nodes.common import MAX_RESOLUTION
@ -302,6 +304,24 @@ class RescaleCFG:
m.set_model_sampler_cfg_function(rescale_cfg)
return (m, )
class ModelComputeDtype:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"dtype": (["default", "fp32", "fp16", "bf16"],),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "advanced/debug/model"
def patch(self, model, dtype):
m = model.clone()
m.set_model_compute_dtype(node_helpers.string_to_torch_dtype(dtype))
return (m, )
NODE_CLASS_MAPPINGS = {
"ModelSamplingDiscrete": ModelSamplingDiscrete,
"ModelSamplingContinuousEDM": ModelSamplingContinuousEDM,
@ -311,4 +331,5 @@ NODE_CLASS_MAPPINGS = {
"ModelSamplingAuraFlow": ModelSamplingAuraFlow,
"ModelSamplingFlux": ModelSamplingFlux,
"RescaleCFG": RescaleCFG,
"ModelComputeDtype": ModelComputeDtype,
}

View File

@ -205,6 +205,54 @@ class ModelMergeLTXV(nodes_model_merging.ModelMergeBlocks):
return {"required": arg_dict}
class ModelMergeCosmos7B(nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific"
@classmethod
def INPUT_TYPES(s):
arg_dict = {"model1": ("MODEL",),
"model2": ("MODEL",)}
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
arg_dict["pos_embedder."] = argument
arg_dict["extra_pos_embedder."] = argument
arg_dict["x_embedder."] = argument
arg_dict["t_embedder."] = argument
arg_dict["affline_norm."] = argument
for i in range(28):
arg_dict["blocks.block{}.".format(i)] = argument
arg_dict["final_layer."] = argument
return {"required": arg_dict}
class ModelMergeCosmos14B(nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific"
@classmethod
def INPUT_TYPES(s):
arg_dict = {"model1": ("MODEL",),
"model2": ("MODEL",)}
argument = ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})
arg_dict["pos_embedder."] = argument
arg_dict["extra_pos_embedder."] = argument
arg_dict["x_embedder."] = argument
arg_dict["t_embedder."] = argument
arg_dict["affline_norm."] = argument
for i in range(36):
arg_dict["blocks.block{}.".format(i)] = argument
arg_dict["final_layer."] = argument
return {"required": arg_dict}
NODE_CLASS_MAPPINGS = {
"ModelMergeSD1": ModelMergeSD1,
"ModelMergeSD2": ModelMergeSD1, # SD1 and SD2 have the same blocks
@ -215,4 +263,6 @@ NODE_CLASS_MAPPINGS = {
"ModelMergeSD35_Large": ModelMergeSD35_Large,
"ModelMergeMochiPreview": ModelMergeMochiPreview,
"ModelMergeLTXV": ModelMergeLTXV,
"ModelMergeCosmos7B": ModelMergeCosmos7B,
"ModelMergeCosmos14B": ModelMergeCosmos14B,
}

View File

@ -0,0 +1,104 @@
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
import torch
class RenormCFG:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"cfg_trunc": ("FLOAT", {"default": 100, "min": 0.0, "max": 100.0, "step": 0.01}),
"renorm_cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "advanced/model"
def patch(self, model, cfg_trunc, renorm_cfg):
def renorm_cfg_func(args):
cond_denoised = args["cond_denoised"]
uncond_denoised = args["uncond_denoised"]
cond_scale = args["cond_scale"]
timestep = args["timestep"]
x_orig = args["input"]
in_channels = model.model.diffusion_model.in_channels
if timestep[0] < cfg_trunc:
cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels]
cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:]
half_eps = uncond_eps + cond_scale * (cond_eps - uncond_eps)
half_rest = cond_rest
if float(renorm_cfg) > 0.0:
ori_pos_norm = torch.linalg.vector_norm(cond_eps
, dim=tuple(range(1, len(cond_eps.shape))), keepdim=True
)
max_new_norm = ori_pos_norm * float(renorm_cfg)
new_pos_norm = torch.linalg.vector_norm(
half_eps, dim=tuple(range(1, len(half_eps.shape))), keepdim=True
)
if new_pos_norm >= max_new_norm:
half_eps = half_eps * (max_new_norm / new_pos_norm)
else:
cond_eps, uncond_eps = cond_denoised[:, :in_channels], uncond_denoised[:, :in_channels]
cond_rest, _ = cond_denoised[:, in_channels:], uncond_denoised[:, in_channels:]
half_eps = cond_eps
half_rest = cond_rest
cfg_result = torch.cat([half_eps, half_rest], dim=1)
# cfg_result = uncond_denoised + (cond_denoised - uncond_denoised) * cond_scale
return x_orig - cfg_result
m = model.clone()
m.set_model_sampler_cfg_function(renorm_cfg_func)
return (m, )
class CLIPTextEncodeLumina2(ComfyNodeABC):
SYSTEM_PROMPT = {
"superior": "You are an assistant designed to generate superior images with the superior "\
"degree of image-text alignment based on textual prompts or user prompts.",
"alignment": "You are an assistant designed to generate high-quality images with the "\
"highest degree of image-text alignment based on textual prompts."
}
SYSTEM_PROMPT_TIP = "Lumina2 provide two types of system prompts:" \
"Superior: You are an assistant designed to generate superior images with the superior "\
"degree of image-text alignment based on textual prompts or user prompts. "\
"Alignment: You are an assistant designed to generate high-quality images with the highest "\
"degree of image-text alignment based on textual prompts."
@classmethod
def INPUT_TYPES(s) -> InputTypeDict:
return {
"required": {
"system_prompt": (list(CLIPTextEncodeLumina2.SYSTEM_PROMPT.keys()), {"tooltip": CLIPTextEncodeLumina2.SYSTEM_PROMPT_TIP}),
"user_prompt": (IO.STRING, {"multiline": True, "dynamicPrompts": True, "tooltip": "The text to be encoded."}),
"clip": (IO.CLIP, {"tooltip": "The CLIP model used for encoding the text."})
}
}
RETURN_TYPES = (IO.CONDITIONING,)
OUTPUT_TOOLTIPS = ("A conditioning containing the embedded text used to guide the diffusion model.",)
FUNCTION = "encode"
CATEGORY = "conditioning"
DESCRIPTION = "Encodes a system prompt and a user prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images."
def encode(self, clip, user_prompt, system_prompt):
if clip is None:
raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.")
system_prompt = CLIPTextEncodeLumina2.SYSTEM_PROMPT[system_prompt]
prompt = f'{system_prompt} <Prompt Start> {user_prompt}'
tokens = clip.tokenize(prompt)
return (clip.encode_from_tokens_scheduled(tokens), )
NODE_CLASS_MAPPINGS = {
"CLIPTextEncodeLumina2": CLIPTextEncodeLumina2,
"RenormCFG": RenormCFG
}
NODE_DISPLAY_NAME_MAPPINGS = {
"CLIPTextEncodeLumina2": "CLIP Text Encode for Lumina2",
}

View File

@ -0,0 +1,75 @@
import os
import av
import torch
import folder_paths
import json
from fractions import Fraction
class SaveWEBM:
def __init__(self):
self.output_dir = folder_paths.get_output_directory()
self.type = "output"
self.prefix_append = ""
@classmethod
def INPUT_TYPES(s):
return {"required":
{"images": ("IMAGE", ),
"filename_prefix": ("STRING", {"default": "ComfyUI"}),
"codec": (["vp9", "av1"],),
"fps": ("FLOAT", {"default": 24.0, "min": 0.01, "max": 1000.0, "step": 0.01}),
"crf": ("FLOAT", {"default": 32.0, "min": 0, "max": 63.0, "step": 1, "tooltip": "Higher crf means lower quality with a smaller file size, lower crf means higher quality higher filesize."}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ()
FUNCTION = "save_images"
OUTPUT_NODE = True
CATEGORY = "image/video"
EXPERIMENTAL = True
def save_images(self, images, codec, fps, filename_prefix, crf, prompt=None, extra_pnginfo=None):
filename_prefix += self.prefix_append
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0])
file = f"{filename}_{counter:05}_.webm"
container = av.open(os.path.join(full_output_folder, file), mode="w")
if prompt is not None:
container.metadata["prompt"] = json.dumps(prompt)
if extra_pnginfo is not None:
for x in extra_pnginfo:
container.metadata[x] = json.dumps(extra_pnginfo[x])
codec_map = {"vp9": "libvpx-vp9", "av1": "libaom-av1"}
stream = container.add_stream(codec_map[codec], rate=Fraction(round(fps * 1000), 1000))
stream.width = images.shape[-2]
stream.height = images.shape[-3]
stream.pix_fmt = "yuv420p"
stream.bit_rate = 0
stream.options = {'crf': str(crf)}
for frame in images:
frame = av.VideoFrame.from_ndarray(torch.clamp(frame[..., :3] * 255, min=0, max=255).to(device=torch.device("cpu"), dtype=torch.uint8).numpy(), format="rgb24")
for packet in stream.encode(frame):
container.mux(packet)
container.close()
results = [{
"filename": file,
"subfolder": subfolder,
"type": self.type
}]
return {"ui": {"images": results, "animated": (True,)}} # TODO: frontend side
NODE_CLASS_MAPPINGS = {
"SaveWEBM": SaveWEBM,
}

View File

@ -13,7 +13,8 @@ torchinfo
safetensors>=0.4.2
bitsandbytes>=0.43.0 ;platform_system != 'Darwin'
bitsandbytes ;platform_system == 'Darwin'
aiohttp>=3.8.4
aiohttp>=3.11.8
yarl>=1.18.0
accelerate>=0.25.0
pyyaml>=6.0
scikit-image>=0.20.0
@ -73,4 +74,5 @@ humanize
lightning
flax
jax
colour
colour
av

View File

@ -23,7 +23,7 @@ package_name = "comfyui"
"""
The current version.
"""
version = "0.3.11"
version = "0.3.15"
"""
The package index to the torch built with AMD ROCm.

View File

@ -26,7 +26,18 @@ def temp_dir():
yield tmpdirname
def test_get_directory_by_type():
@pytest.fixture
def set_base_dir():
fn = FolderNames()
with context_folder_names_and_paths(FolderNames()):
def _set_base_dir(base_dir):
fn.base_paths.clear()
fn.add_base_path(Path(base_dir))
yield _set_base_dir
def test_get_directory_by_type(clear_folder_paths):
test_dir = "/test/dir"
folder_paths.set_output_directory(test_dir)
assert folder_paths.get_directory_by_type("output") == test_dir
@ -118,3 +129,49 @@ def test_add_output_path_absolute(temp_dir):
assert len(mp.additional_absolute_directory_paths) == 0
assert len(mp.additional_relative_directory_paths) == 1
assert list(mp.additional_relative_directory_paths)[0] == (Path("output") / "diffusion_models")
def test_base_path_changes(set_base_dir):
test_dir = os.path.abspath("/test/dir")
set_base_dir(test_dir)
assert folder_paths.base_path == test_dir
assert folder_paths.models_dir == os.path.join(test_dir, "models")
assert folder_paths.input_directory == os.path.join(test_dir, "input")
assert folder_paths.output_directory == os.path.join(test_dir, "output")
assert folder_paths.temp_directory == os.path.join(test_dir, "temp")
assert folder_paths.user_directory == os.path.join(test_dir, "user")
assert os.path.join(test_dir, "custom_nodes") in folder_paths.get_folder_paths("custom_nodes")
for name in ["checkpoints", "loras", "vae", "configs", "embeddings", "controlnet", "classifiers"]:
assert folder_paths.get_folder_paths(name)[0] == os.path.join(test_dir, "models", name)
def test_base_path_change_clears_old(set_base_dir):
test_dir = os.path.abspath("/test/dir")
set_base_dir(test_dir)
assert len(folder_paths.get_folder_paths("custom_nodes")) == 1
single_model_paths = [
"checkpoints",
"loras",
"vae",
"configs",
"clip_vision",
"style_models",
"diffusers",
"vae_approx",
"gligen",
"upscale_models",
"embeddings",
"hypernetworks",
"photomaker",
"classifiers",
]
for name in single_model_paths:
assert len(folder_paths.get_folder_paths(name)) == 1
for name in ["controlnet", "diffusion_models", "text_encoders"]:
assert len(folder_paths.get_folder_paths(name)) == 2

View File

@ -1,115 +0,0 @@
import pytest
from aiohttp import web
from unittest.mock import MagicMock, patch
from comfy.api_server.routes.internal.internal_routes import InternalRoutes
from comfy.api_server.services.file_service import FileService
from comfy.cmd.folder_paths import models_dir, user_directory, output_directory
@pytest.fixture
def internal_routes():
return InternalRoutes(None)
@pytest.fixture
def aiohttp_client_factory(aiohttp_client, internal_routes):
async def _get_client():
app = internal_routes.get_app()
return await aiohttp_client(app)
return _get_client
@pytest.mark.asyncio
async def test_list_files_valid_directory(aiohttp_client_factory, internal_routes):
mock_file_list = [
{"name": "file1.txt", "path": "file1.txt", "type": "file", "size": 100},
{"name": "dir1", "path": "dir1", "type": "directory"}
]
internal_routes.file_service.list_files = MagicMock(return_value=mock_file_list)
client = await aiohttp_client_factory()
resp = await client.get('/files?directory=models')
assert resp.status == 200
data = await resp.json()
assert 'files' in data
assert len(data['files']) == 2
assert data['files'] == mock_file_list
# Check other valid directories
resp = await client.get('/files?directory=user')
assert resp.status == 200
resp = await client.get('/files?directory=output')
assert resp.status == 200
@pytest.mark.asyncio
async def test_list_files_invalid_directory(aiohttp_client_factory, internal_routes):
internal_routes.file_service.list_files = MagicMock(side_effect=ValueError("Invalid directory key"))
client = await aiohttp_client_factory()
resp = await client.get('/files?directory=invalid')
assert resp.status == 400
data = await resp.json()
assert 'error' in data
assert data['error'] == "Invalid directory key"
@pytest.mark.asyncio
async def test_list_files_exception(aiohttp_client_factory, internal_routes):
internal_routes.file_service.list_files = MagicMock(side_effect=Exception("Unexpected error"))
client = await aiohttp_client_factory()
resp = await client.get('/files?directory=models')
assert resp.status == 500
data = await resp.json()
assert 'error' in data
assert data['error'] == "Unexpected error"
@pytest.mark.asyncio
async def test_list_files_no_directory_param(aiohttp_client_factory, internal_routes):
mock_file_list = []
internal_routes.file_service.list_files = MagicMock(return_value=mock_file_list)
client = await aiohttp_client_factory()
resp = await client.get('/files')
assert resp.status == 200
data = await resp.json()
assert 'files' in data
assert len(data['files']) == 0
def test_setup_routes(internal_routes):
internal_routes.setup_routes()
routes = internal_routes.routes
assert any(route.method == 'GET' and str(route.path) == '/files' for route in routes)
def test_get_app(internal_routes):
app = internal_routes.get_app()
assert isinstance(app, web.Application)
assert internal_routes._app is not None
def test_get_app_reuse(internal_routes):
app1 = internal_routes.get_app()
app2 = internal_routes.get_app()
assert app1 is app2
@pytest.mark.asyncio
async def test_routes_added_to_app(aiohttp_client_factory, internal_routes):
client = await aiohttp_client_factory()
try:
resp = await client.get('/files')
print(f"Response received: status {resp.status}") # noqa: T201
except Exception as e:
print(f"Exception occurred during GET request: {e}") # noqa: T201
raise
assert resp.status != 404, "Route /files does not exist"
@pytest.mark.asyncio
async def test_file_service_initialization():
with patch('comfy.api_server.routes.internal.internal_routes.FileService') as MockFileService:
# Create a mock instance
mock_file_service_instance = MagicMock(spec=FileService)
MockFileService.return_value = mock_file_service_instance
internal_routes = InternalRoutes(None)
# Check if FileService was initialized with the correct parameters
MockFileService.assert_called_once_with({
"models": models_dir,
"user": user_directory,
"output": output_directory
})
# Verify that the file_service attribute of InternalRoutes is set
assert internal_routes.file_service == mock_file_service_instance

View File

@ -1,62 +0,0 @@
from unittest.mock import MagicMock
import pytest
from comfy.api_server.services.file_service import FileService
@pytest.fixture
def mock_file_system_ops():
return MagicMock()
@pytest.fixture
def file_service(mock_file_system_ops):
allowed_directories = {
"models": "/path/to/models",
"user": "/path/to/user",
"output": "/path/to/output"
}
return FileService(allowed_directories, file_system_ops=mock_file_system_ops)
def test_list_files_valid_directory(file_service, mock_file_system_ops):
mock_file_system_ops.walk_directory.return_value = [
{"name": "file1.txt", "path": "file1.txt", "type": "file", "size": 100},
{"name": "dir1", "path": "dir1", "type": "directory"}
]
result = file_service.list_files("models")
assert len(result) == 2
assert result[0]["name"] == "file1.txt"
assert result[1]["name"] == "dir1"
mock_file_system_ops.walk_directory.assert_called_once_with("/path/to/models")
def test_list_files_invalid_directory(file_service):
# Does not support walking directories outside of the allowed directories
with pytest.raises(ValueError, match="Invalid directory key"):
file_service.list_files("invalid_key")
def test_list_files_empty_directory(file_service, mock_file_system_ops):
mock_file_system_ops.walk_directory.return_value = []
result = file_service.list_files("models")
assert len(result) == 0
mock_file_system_ops.walk_directory.assert_called_once_with("/path/to/models")
@pytest.mark.parametrize("directory_key", ["models", "user", "output"])
def test_list_files_all_allowed_directories(file_service, mock_file_system_ops, directory_key):
mock_file_system_ops.walk_directory.return_value = [
{"name": f"file_{directory_key}.txt", "path": f"file_{directory_key}.txt", "type": "file", "size": 100}
]
result = file_service.list_files(directory_key)
assert len(result) == 1
assert result[0]["name"] == f"file_{directory_key}.txt"
mock_file_system_ops.walk_directory.assert_called_once_with(f"/path/to/{directory_key}")

View File

@ -118,7 +118,7 @@ def test_load_extra_model_paths_expands_userpath(
mock_yaml_safe_load.assert_called_once()
# Check if open was called with the correct file path
mock_file.assert_called_once_with(dummy_yaml_file_name, 'r')
mock_file.assert_called_once_with(dummy_yaml_file_name, 'r', encoding='utf-8')
@patch('builtins.open', new_callable=mock_open)
@ -149,7 +149,7 @@ def test_load_extra_model_paths_expands_appdata(
else:
expected_base_path = '/Users/TestUser/AppData/Roaming/ComfyUI'
expected_calls = [
('checkpoints', os.path.join(expected_base_path, 'models/checkpoints'), False),
('checkpoints', os.path.normpath(os.path.join(expected_base_path, 'models/checkpoints')), False),
]
assert mock_add_model_folder_path.call_count == len(expected_calls)