Update ComfyUI

- use their logger when running interactively
 - move the extra nodes files to where this fork expects them
 - add the mochi checkpoints to known models
 - add a mochi workflow test
This commit is contained in:
doctorpangloss 2024-11-18 13:58:24 -08:00
parent c0f072ee0f
commit fb7a3f9386
11 changed files with 210 additions and 65 deletions

View File

@ -4,7 +4,9 @@ from aiohttp import web
from ...services.file_service import FileService from ...services.file_service import FileService
from ...services.terminal_service import TerminalService from ...services.terminal_service import TerminalService
from ....cmd.folder_paths import models_dir, user_directory, output_directory, folder_names_and_paths # pylint: disable=import-error from ....app import logger
from ....cmd.folder_paths import models_dir, user_directory, output_directory, \
folder_names_and_paths # pylint: disable=import-error
class InternalRoutes: class InternalRoutes:
@ -40,16 +42,13 @@ class InternalRoutes:
@self.routes.get('/logs') @self.routes.get('/logs')
async def get_logs(request): async def get_logs(request):
return web.json_response({}) return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in logger.get_logs()]))
# todo: enable logs
# return web.json_response("".join([(l["t"] + " - " + l["m"]) for l in app.logger.get_logs()]))
@self.routes.get('/logs/raw') @self.routes.get('/logs/raw')
async def get_logs(request): async def get_logs(request):
self.terminal_service.update_size() self.terminal_service.update_size()
return web.json_response({ return web.json_response({
# todo: enable logs "entries": list(logger.get_logs()),
# "entries": list(app.logger.get_logs()),
"size": {"cols": self.terminal_service.cols, "rows": self.terminal_service.rows} "size": {"cols": self.terminal_service.cols, "rows": self.terminal_service.rows}
}) })
@ -65,7 +64,6 @@ class InternalRoutes:
return web.Response(status=200) return web.Response(status=200)
@self.routes.get('/folder_paths') @self.routes.get('/folder_paths')
async def get_folder_paths(request): async def get_folder_paths(request):
response = {} response = {}

View File

@ -1,6 +1,8 @@
from typing import Dict, List, Optional from typing import Dict, List, Optional
from ..utils.file_operations import FileSystemOperations, FileSystemItem from ..utils.file_operations import FileSystemOperations, FileSystemItem
class FileService: class FileService:
def __init__(self, allowed_directories: Dict[str, str], file_system_ops: Optional[FileSystemOperations] = None): def __init__(self, allowed_directories: Dict[str, str], file_system_ops: Optional[FileSystemOperations] = None):
self.allowed_directories: Dict[str, str] = allowed_directories self.allowed_directories: Dict[str, str] = allowed_directories
@ -10,4 +12,4 @@ class FileService:
if directory_key not in self.allowed_directories: if directory_key not in self.allowed_directories:
raise ValueError("Invalid directory key") raise ValueError("Invalid directory key")
directory_path: str = self.allowed_directories[directory_key] directory_path: str = self.allowed_directories[directory_key]
return self.file_system_ops.walk_directory(directory_path) return self.file_system_ops.walk_directory(directory_path)

View File

@ -1,6 +1,7 @@
from app.logger import on_flush
import os import os
from ...app.logger import on_flush
class TerminalService: class TerminalService:
def __init__(self, server): def __init__(self, server):
@ -15,7 +16,7 @@ class TerminalService:
changed = False changed = False
if sz.columns != self.cols: if sz.columns != self.cols:
self.cols = sz.columns self.cols = sz.columns
changed = True changed = True
if sz.lines != self.rows: if sz.lines != self.rows:
self.rows = sz.lines self.rows = sz.lines
@ -35,10 +36,10 @@ class TerminalService:
def send_messages(self, entries): def send_messages(self, entries):
if not len(entries) or not len(self.subscriptions): if not len(entries) or not len(self.subscriptions):
return return
new_size = self.update_size() new_size = self.update_size()
for client_id in self.subscriptions.copy(): # prevent: Set changed size during iteration for client_id in self.subscriptions.copy(): # prevent: Set changed size during iteration
if client_id not in self.server.sockets: if client_id not in self.server.sockets:
# Automatically unsub if the socket has disconnected # Automatically unsub if the socket has disconnected
self.unsubscribe(client_id) self.unsubscribe(client_id)

View File

@ -1,5 +1,6 @@
import os
import json import json
import os
from aiohttp import web from aiohttp import web
@ -51,4 +52,4 @@ class AppSettings():
settings = self.get_settings(request) settings = self.get_settings(request)
settings[setting_id] = await request.json() settings[setting_id] = await request.json()
self.save_settings(request, settings) self.save_settings(request, settings)
return web.Response(status=200) return web.Response(status=200)

View File

@ -25,6 +25,7 @@ from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
from opentelemetry.semconv.resource import ResourceAttributes as ResAttrs from opentelemetry.semconv.resource import ResourceAttributes as ResAttrs
from .. import options from .. import options
from ..app import logger
from ..tracing_compatibility import ProgressSpanSampler from ..tracing_compatibility import ProgressSpanSampler
from ..tracing_compatibility import patch_spanbuilder_set_channel from ..tracing_compatibility import patch_spanbuilder_set_channel
from ..vendor.aiohttp_server_instrumentation import AioHttpServerInstrumentor from ..vendor.aiohttp_server_instrumentation import AioHttpServerInstrumentor
@ -35,6 +36,7 @@ if os.name == "nt":
warnings.filterwarnings("ignore", message="torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.") warnings.filterwarnings("ignore", message="torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead.")
warnings.filterwarnings("ignore", message="Torch was not compiled with flash attention.") warnings.filterwarnings("ignore", message="Torch was not compiled with flash attention.")
warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*") warnings.filterwarnings("ignore", message=".*Torch was not compiled with flash attention.*")
warnings.filterwarnings('ignore', category=FutureWarning, message=r'`torch\.cuda\.amp\.custom_fwd.*')
from ..cli_args import args from ..cli_args import args
@ -118,7 +120,10 @@ def _create_tracer():
def _configure_logging(): def _configure_logging():
logging_level = args.logging_level logging_level = args.logging_level
logging.basicConfig(level=logging_level) if args.distributed_queue_worker or args.distributed_queue_frontend or args.distributed_queue_connection_uri is not None:
logging.basicConfig(level=logging_level)
else:
logger.setup_logger(logging_level)
_configure_logging() _configure_logging()

View File

@ -415,6 +415,7 @@ KNOWN_VAES: Final[KnownDownloadables] = KnownDownloadables([
HuggingFile("stabilityai/sdxl-vae", "sdxl_vae.safetensors"), HuggingFile("stabilityai/sdxl-vae", "sdxl_vae.safetensors"),
HuggingFile("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors"), HuggingFile("stabilityai/sd-vae-ft-mse-original", "vae-ft-mse-840000-ema-pruned.safetensors"),
HuggingFile("black-forest-labs/FLUX.1-schnell", "ae.safetensors"), HuggingFile("black-forest-labs/FLUX.1-schnell", "ae.safetensors"),
HuggingFile("Comfy-Org/mochi_preview_repackaged", "split_files/vae/mochi_vae.safetensors"),
], folder_name="vae") ], folder_name="vae")
KNOWN_HUGGINGFACE_MODEL_REPOS: Final[Set[str]] = { KNOWN_HUGGINGFACE_MODEL_REPOS: Final[Set[str]] = {
@ -434,12 +435,15 @@ KNOWN_UNET_MODELS: Final[KnownDownloadables] = KnownDownloadables([
HuggingFile("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors"), HuggingFile("black-forest-labs/FLUX.1-dev", "flux1-dev.safetensors"),
HuggingFile("Kijai/flux-fp8", "flux1-dev-fp8.safetensors"), HuggingFile("Kijai/flux-fp8", "flux1-dev-fp8.safetensors"),
HuggingFile("Kijai/flux-fp8", "flux1-schnell-fp8.safetensors"), HuggingFile("Kijai/flux-fp8", "flux1-schnell-fp8.safetensors"),
HuggingFile("Comfy-Org/mochi_preview_repackaged", "split_files/diffusion_models/mochi_preview_bf16.safetensors"),
HuggingFile("Comfy-Org/mochi_preview_repackaged", "split_files/diffusion_models/mochi_preview_fp8_scaled.safetensors"),
], folder_name="diffusion_models") ], folder_name="diffusion_models")
KNOWN_CLIP_MODELS: Final[KnownDownloadables] = KnownDownloadables([ KNOWN_CLIP_MODELS: Final[KnownDownloadables] = KnownDownloadables([
# todo: is this correct? # todo: is this correct?
HuggingFile("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors"), HuggingFile("comfyanonymous/flux_text_encoders", "t5xxl_fp16.safetensors"),
HuggingFile("comfyanonymous/flux_text_encoders", "t5xxl_fp8_e4m3fn.safetensors"), HuggingFile("comfyanonymous/flux_text_encoders", "t5xxl_fp8_e4m3fn.safetensors"),
HuggingFile("Comfy-Org/mochi_preview_repackaged", "split_files/text_encoders/t5xxl_fp8_e4m3fn_scaled.safetensors"),
HuggingFile("stabilityai/stable-diffusion-3-medium", "text_encoders/clip_g.safetensors"), HuggingFile("stabilityai/stable-diffusion-3-medium", "text_encoders/clip_g.safetensors"),
HuggingFile("comfyanonymous/flux_text_encoders", "clip_l.safetensors", save_with_filename="clip_l.safetensors"), HuggingFile("comfyanonymous/flux_text_encoders", "clip_l.safetensors", save_with_filename="clip_l.safetensors"),
# uses names from https://comfyanonymous.github.io/ComfyUI_examples/audio/ # uses names from https://comfyanonymous.github.io/ComfyUI_examples/audio/

85
comfy/web/index.html vendored
View File

@ -1,43 +1,42 @@
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
<head> <head>
<meta charset="UTF-8"> <meta charset="UTF-8">
<title>ComfyUI</title> <title>ComfyUI</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no"> <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<link rel="stylesheet" type="text/css" href="user.css" /> <link rel="stylesheet" type="text/css" href="user.css" />
<link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" /> <link rel="stylesheet" type="text/css" href="materialdesignicons.min.css" />
<script type="module" crossorigin src="./assets/index-BNX_XOqh.js"></script> <script type="module" crossorigin src="./assets/index-bi78Y1IN.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-HT1vecxT.css"> <link rel="stylesheet" crossorigin href="./assets/index-BObQyxMu.css">
</head> </head>
<body class="litegraph grid"> <body class="litegraph grid">
<div id="vue-app"></div> <div id="vue-app"></div>
<div id="comfy-user-selection" class="comfy-user-selection" style="display: none;"> <div id="comfy-user-selection" class="comfy-user-selection" style="display: none;">
<main class="comfy-user-selection-inner"> <main class="comfy-user-selection-inner">
<h1>ComfyUI</h1> <h1>ComfyUI</h1>
<form> <form>
<section> <section>
<label>New user: <label>New user:
<input placeholder="Enter a username" /> <input placeholder="Enter a username" />
</label> </label>
</section> </section>
<div class="comfy-user-existing"> <div class="comfy-user-existing">
<span class="or-separator">OR</span> <span class="or-separator">OR</span>
<section> <section>
<label> <label>
Existing user: Existing user:
<select> <select>
<option hidden disabled selected value> Select a user </option> <option hidden disabled selected value> Select a user </option>
</select> </select>
</label> </label>
</section> </section>
</div> </div>
<footer> <footer>
<span class="comfy-user-error">&nbsp;</span> <span class="comfy-user-error">&nbsp;</span>
<button class="comfy-btn comfy-user-button-next">Next</button> <button class="comfy-btn comfy-user-button-next">Next</button>
</footer> </footer>
</form> </form>
</main> </main>
</div> </div>
</body>
</body> </html>
</html>

View File

@ -80,7 +80,7 @@ class ModelMergeSD3_2B(nodes_model_merging.ModelMergeBlocks):
return {"required": arg_dict} return {"required": arg_dict}
class ModelMergeAuraflow(comfy_extras.nodes_model_merging.ModelMergeBlocks): class ModelMergeAuraflow(nodes_model_merging.ModelMergeBlocks):
CATEGORY = "advanced/model_merging/model_specific" CATEGORY = "advanced/model_merging/model_specific"
@classmethod @classmethod

View File

@ -1,6 +1,7 @@
import re
import comfy.model_patcher import comfy.model_patcher
import comfy.samplers import comfy.samplers
import re
class SkipLayerGuidanceDiT: class SkipLayerGuidanceDiT:
@ -9,15 +10,17 @@ class SkipLayerGuidanceDiT:
Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377) Inspired by Perturbed Attention Guidance (https://arxiv.org/abs/2403.17377)
Original experimental implementation for SD3 by Dango233@StabilityAI. Original experimental implementation for SD3 by Dango233@StabilityAI.
''' '''
@classmethod @classmethod
def INPUT_TYPES(s): def INPUT_TYPES(s):
return {"required": {"model": ("MODEL", ), return {"required": {"model": ("MODEL",),
"double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), "double_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
"single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}), "single_layers": ("STRING", {"default": "7, 8, 9", "multiline": False}),
"scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}), "scale": ("FLOAT", {"default": 3.0, "min": 0.0, "max": 10.0, "step": 0.1}),
"start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}), "start_percent": ("FLOAT", {"default": 0.01, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001}) "end_percent": ("FLOAT", {"default": 0.15, "min": 0.0, "max": 1.0, "step": 0.001})
}} }}
RETURN_TYPES = ("MODEL",) RETURN_TYPES = ("MODEL",)
FUNCTION = "skip_guidance" FUNCTION = "skip_guidance"
EXPERIMENTAL = True EXPERIMENTAL = True
@ -42,7 +45,7 @@ class SkipLayerGuidanceDiT:
single_layers = [int(i) for i in single_layers] single_layers = [int(i) for i in single_layers]
if len(double_layers) == 0 and len(single_layers) == 0: if len(double_layers) == 0 and len(single_layers) == 0:
return (model, ) return (model,)
def post_cfg_function(args): def post_cfg_function(args):
model = args["model"] model = args["model"]
@ -70,7 +73,7 @@ class SkipLayerGuidanceDiT:
m = model.clone() m = model.clone()
m.set_model_sampler_post_cfg_function(post_cfg_function) m.set_model_sampler_post_cfg_function(post_cfg_function)
return (m, ) return (m,)
NODE_CLASS_MAPPINGS = { NODE_CLASS_MAPPINGS = {

View File

@ -0,0 +1,132 @@
{
"3": {
"inputs": {
"seed": 309794859719915,
"steps": 30,
"cfg": 4.5,
"sampler_name": "euler",
"scheduler": "simple",
"denoise": 1,
"model": [
"37",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"21",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"6": {
"inputs": {
"text": "a fox moving quickly in a beautiful winter scenery nature trees sunset tracking camera",
"clip": [
"38",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "",
"clip": [
"38",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"39",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"21": {
"inputs": {
"width": 848,
"height": 480,
"length": 37,
"batch_size": 1
},
"class_type": "EmptyMochiLatentVideo",
"_meta": {
"title": "EmptyMochiLatentVideo"
}
},
"28": {
"inputs": {
"filename_prefix": "ComfyUI",
"fps": 24,
"lossless": false,
"quality": 80,
"method": "default",
"images": [
"8",
0
]
},
"class_type": "SaveAnimatedWEBP",
"_meta": {
"title": "SaveAnimatedWEBP"
}
},
"37": {
"inputs": {
"unet_name": "mochi_preview_bf16.safetensors",
"weight_dtype": "default"
},
"class_type": "UNETLoader",
"_meta": {
"title": "Load Diffusion Model"
}
},
"38": {
"inputs": {
"clip_name": "t5xxl_fp16.safetensors",
"type": "mochi"
},
"class_type": "CLIPLoader",
"_meta": {
"title": "Load CLIP"
}
},
"39": {
"inputs": {
"vae_name": "mochi_vae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
}
}