update docker image

This commit is contained in:
doctorpangloss 2025-07-17 12:45:59 -07:00
parent ac8e5aee7d
commit d709e10bcb
4 changed files with 60 additions and 36 deletions

View File

@ -31,7 +31,32 @@ RUN uv pip uninstall --system $(pip list --format=freeze | grep opencv) && \
rm -rf /usr/local/lib/python3.12/dist-packages/cv2/ && \
uv pip install --no-build-isolation opencv-python-headless
RUN uv pip install --overrides=numpy-override.txt "comfyui[attention,comfyui_manager]@git+https://github.com/hiddenswitch/ComfyUI.git"
# this exotic command will determine the correct torchaudio to install for the image
RUN <<-EOF
python -c 'import torch, re, subprocess
torch_version_full = torch.__version__
torch_ver_match = re.match(r"(\d+\.\d+\.\d+)", torch_version_full)
if not torch_ver_match:
raise ValueError(f"Could not parse torch version from {torch_version_full}")
torch_ver = torch_ver_match.group(1)
cuda_ver_tag = f"cu{torch.version.cuda.replace(".", "")}"
command = [
"uv", "pip", "install", "--no-deps", "--overrides=numpy-override.txt",
f"torchaudio=={torch_ver}+{cuda_ver_tag}",
"--extra-index-url", f"https://download.pytorch.org/whl/{cuda_ver_tag}",
]
subprocess.run(command, check=True)'
EOF
# sources for building this dockerfile
# use these lines to build from the local fs
# ADD . /src
# ARG SOURCES=/src
# this builds from github
ARG SOURCES="comfyui[attention,comfyui_manager]@git+https://github.com/hiddenswitch/ComfyUI.git"
ENV SOURCES=$SOURCES
RUN uv pip install --overrides=numpy-override.txt $SOURCES
WORKDIR /workspace
# addresses https://github.com/pytorch/pytorch/issues/104801
@ -39,4 +64,4 @@ WORKDIR /workspace
RUN comfyui --quick-test-for-ci --cpu --cwd /workspace
EXPOSE 8188
CMD ["python", "-m", "comfy.cmd.main", "--listen"]
CMD ["python", "-m", "comfy.cmd.main", "--listen", "--use-sage-attention", "--reserve-vram=0", "--logging-level=INFO", "--enable-cors"]

View File

@ -53,6 +53,13 @@ def on_flush(callback):
stderr_interceptor.on_flush(callback)
class StackTraceLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, stacklevel=1):
if level >= logging.WARNING:
stack_info = True
super()._log(level, msg, args, exc_info, extra, stack_info, stacklevel=stacklevel + 1)
def setup_logger(log_level: str = 'INFO', capacity: int = 300, use_stdout: bool = False):
global logs
if logs:
@ -71,6 +78,7 @@ def setup_logger(log_level: str = 'INFO', capacity: int = 300, use_stdout: bool
stderr_interceptor = sys.stderr = LogInterceptor(sys.stderr)
# Setup default global logger
logging.setLoggerClass(StackTraceLogger)
logger = logging.getLogger()
logger.setLevel(log_level)

View File

@ -7,7 +7,15 @@ Use this instead of cli_args to import the args:
It will enable command line argument parsing. If this isn't desired, you must author your own implementation of these fixes.
"""
import ctypes
import importlib.util
import logging
import os
import shutil
import warnings
from .. import options
from ..app import logger
os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
@ -18,32 +26,6 @@ os.environ["NO_ALBUMENTATIONS_UPDATE"] = "1"
os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1'
os.environ['DO_NOT_TRACK'] = '1'
if 'OTEL_METRICS_EXPORTER' not in os.environ:
os.environ['OTEL_METRICS_EXPORTER'] = 'none'
import ctypes
import importlib.util
import logging
import shutil
import sys
import warnings
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.aio_pika import AioPikaInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.semconv.attributes import service_attributes
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
from .. import options
from ..app import logger
from ..tracing_compatibility import ProgressSpanSampler
from ..tracing_compatibility import patch_spanbuilder_set_channel
from ..vendor.aiohttp_server_instrumentation import AioHttpServerInstrumentor
this_logger = logging.getLogger(__name__)
options.enable_args_parsing()
@ -114,6 +96,19 @@ def _fix_pytorch_240():
def _create_tracer():
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.aio_pika import AioPikaInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.semconv.attributes import service_attributes
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
from ..tracing_compatibility import ProgressSpanSampler
from ..tracing_compatibility import patch_spanbuilder_set_channel
from ..vendor.aiohttp_server_instrumentation import AioHttpServerInstrumentor
resource = Resource.create({
service_attributes.SERVICE_NAME: args.otel_service_name,
service_attributes.SERVICE_VERSION: args.otel_service_version,
@ -133,22 +128,18 @@ def _create_tracer():
processor = BatchSpanProcessor(otlp_exporter)
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
# enable instrumentation
patch_spanbuilder_set_channel()
AioPikaInstrumentor().instrument()
AioHttpServerInstrumentor().instrument()
RequestsInstrumentor().instrument()
return trace.get_tracer(args.otel_service_name)
# makes this behave better as a library
return trace.get_tracer(args.otel_service_name, tracer_provider=provider)
def _configure_logging():
logging_level = args.logging_level
if len(args.workflows) > 0 or args.distributed_queue_worker or args.distributed_queue_frontend or args.distributed_queue_connection_uri is not None:
logging.basicConfig(level=logging_level, stream=sys.stderr)
else:
logger.setup_logger(logging_level)
logger.setup_logger(logging_level)
_configure_logging()

View File

@ -27,7 +27,7 @@ dependencies = [
"torchsde>=0.2.6",
"einops>=0.6.0",
"open-clip-torch>=2.24.0",
"transformers>=4.29.1",
"transformers!=4.53.0,!=4.53.1,!=4.53.2",
"tokenizers>=0.13.3",
"sentencepiece",
"peft>=0.10.0",