feat(entrypoint): dynamic CUDA arch detection, first-run override, fix git clone, clarify Sage Attention flags

Compute TORCH_CUDA_ARCH_LIST from torch.cuda device properties to build
for the exact GPUs present, improving correctness across mixed setups.

Add first-run dependency install gate with a COMFY_AUTO_INSTALL=1
override to re-run installs on later boots without removing the flag.

Use `python -m pip` consistently with `--no-cache-dir` to avoid stale
wheels and reduce container bloat during rebuilds.

Fix git clone commands to standard HTTPS (no Markdown link syntax) and
use shallow fetch/reset against origin/HEAD for speed and reliability.

Clarify Sage Attention flags: set SAGE_ATTENTION_AVAILABLE only when the
module is importable; require FORCE_SAGE_ATTENTION=1 to enable at boot.

Keep readable GPU logs via `nvidia-smi`, while relying on torch for
compile-time arch targeting. Improve logging throughout the flow.
This commit is contained in:
clsferguson 2025-09-26 12:10:28 -06:00 committed by GitHub
parent f2f351d235
commit 13f3f11431
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -10,6 +10,7 @@ CUSTOM_NODES_DIR="$BASE_DIR/custom_nodes"
SAGE_ATTENTION_DIR="$BASE_DIR/.sage_attention" SAGE_ATTENTION_DIR="$BASE_DIR/.sage_attention"
SAGE_ATTENTION_BUILT_FLAG="$SAGE_ATTENTION_DIR/.built" SAGE_ATTENTION_BUILT_FLAG="$SAGE_ATTENTION_DIR/.built"
PERMISSIONS_SET_FLAG="$BASE_DIR/.permissions_set" PERMISSIONS_SET_FLAG="$BASE_DIR/.permissions_set"
FIRST_RUN_FLAG="$BASE_DIR/.first_run_complete"
# Function to log with timestamp # Function to log with timestamp
log() { log() {
@ -24,19 +25,18 @@ import sys
if not torch.cuda.is_available(): if not torch.cuda.is_available():
print('[ERROR] PyTorch CUDA not available') print('[ERROR] PyTorch CUDA not available')
sys.exit(1) sys.exit(1)
device_count = torch.cuda.device_count() device_count = torch.cuda.device_count()
print(f'[TEST] PyTorch CUDA available with {device_count} devices') print(f'[TEST] PyTorch CUDA available with {device_count} devices')
for i in range(device_count): for i in range(device_count):
props = torch.cuda.get_device_properties(i) props = torch.cuda.get_device_properties(i)
print(f'[TEST] GPU {i}: {props.name} (Compute {props.major}.{props.minor})') print(f'[TEST] GPU {i}: {props.name} (Compute {props.major}.{props.minor})')
" 2>/dev/null " 2>/dev/null
} }
# Function to detect all GPUs and their generations # Function to detect all GPUs and their generations (best-effort labels)
detect_gpu_generations() { detect_gpu_generations() {
local gpu_info=$(nvidia-smi --query-gpu=name --format=csv,noheader,nounits 2>/dev/null || echo "") local gpu_info
gpu_info=$(nvidia-smi --query-gpu=name --format=csv,noheader,nounits 2>/dev/null || echo "")
local has_rtx20=false local has_rtx20=false
local has_rtx30=false local has_rtx30=false
local has_rtx40=false local has_rtx40=false
@ -52,24 +52,14 @@ detect_gpu_generations() {
while IFS= read -r gpu; do while IFS= read -r gpu; do
gpu_count=$((gpu_count + 1)) gpu_count=$((gpu_count + 1))
log " GPU $gpu_count: $gpu" log " GPU $gpu_count: $gpu"
case "$gpu" in case "$gpu" in
*"RTX 20"*|*"2060"*|*"2070"*|*"2080"*|*"2090"*) *"RTX 20"*|*"2060"*|*"2070"*|*"2080"*|*"2090"*) has_rtx20=true ;;
has_rtx20=true *"RTX 30"*|*"3060"*|*"3070"*|*"3080"*|*"3090"*) has_rtx30=true ;;
;; *"RTX 40"*|*"4060"*|*"4070"*|*"4080"*|*"4090"*) has_rtx40=true ;;
*"RTX 30"*|*"3060"*|*"3070"*|*"3080"*|*"3090"*) *"RTX 50"*|*"5060"*|*"5070"*|*"5080"*|*"5090"*) has_rtx50=true ;;
has_rtx30=true
;;
*"RTX 40"*|*"4060"*|*"4070"*|*"4080"*|*"4090"*)
has_rtx40=true
;;
*"RTX 50"*|*"5060"*|*"5070"*|*"5080"*|*"5090"*)
has_rtx50=true
;;
esac esac
done <<< "$gpu_info" done <<< "$gpu_info"
# Store detection results globally
export DETECTED_RTX20=$has_rtx20 export DETECTED_RTX20=$has_rtx20
export DETECTED_RTX30=$has_rtx30 export DETECTED_RTX30=$has_rtx30
export DETECTED_RTX40=$has_rtx40 export DETECTED_RTX40=$has_rtx40
@ -78,7 +68,6 @@ detect_gpu_generations() {
log "Detection summary: RTX20=$has_rtx20, RTX30=$has_rtx30, RTX40=$has_rtx40, RTX50=$has_rtx50" log "Detection summary: RTX20=$has_rtx20, RTX30=$has_rtx30, RTX40=$has_rtx40, RTX50=$has_rtx50"
# Test PyTorch CUDA compatibility
if test_pytorch_cuda; then if test_pytorch_cuda; then
log "PyTorch CUDA compatibility confirmed" log "PyTorch CUDA compatibility confirmed"
else else
@ -90,19 +79,18 @@ detect_gpu_generations() {
determine_sage_strategy() { determine_sage_strategy() {
local strategy="" local strategy=""
# Mixed generation logic - prioritize compatibility over peak performance if [ "${DETECTED_RTX20:-false}" = "true" ]; then
if [ "$DETECTED_RTX20" = "true" ]; then if [ "${DETECTED_RTX30:-false}" = "true" ] || [ "${DETECTED_RTX40:-false}" = "true" ] || [ "${DETECTED_RTX50:-false}" = "true" ]; then
if [ "$DETECTED_RTX30" = "true" ] || [ "$DETECTED_RTX40" = "true" ] || [ "$DETECTED_RTX50" = "true" ]; then
strategy="mixed_with_rtx20" strategy="mixed_with_rtx20"
log "Mixed GPU setup detected with RTX 20 series - using compatibility mode" log "Mixed GPU setup detected with RTX 20 series - using compatibility mode"
else else
strategy="rtx20_only" strategy="rtx20_only"
log "RTX 20 series only detected" log "RTX 20 series only detected"
fi fi
elif [ "$DETECTED_RTX50" = "true" ]; then elif [ "${DETECTED_RTX50:-false}" = "true" ]; then
strategy="rtx50_capable" strategy="rtx50_capable"
log "RTX 50 series detected - using latest optimizations" log "RTX 50 series detected - using latest optimizations"
elif [ "$DETECTED_RTX40" = "true" ] || [ "$DETECTED_RTX30" = "true" ]; then elif [ "${DETECTED_RTX40:-false}" = "true" ] || [ "${DETECTED_RTX30:-false}" = "true" ]; then
strategy="rtx30_40_optimized" strategy="rtx30_40_optimized"
log "RTX 30/40 series detected - using standard optimizations" log "RTX 30/40 series detected - using standard optimizations"
else else
@ -118,22 +106,22 @@ install_triton_version() {
case "$SAGE_STRATEGY" in case "$SAGE_STRATEGY" in
"mixed_with_rtx20"|"rtx20_only") "mixed_with_rtx20"|"rtx20_only")
log "Installing Triton 3.2.0 for RTX 20 series compatibility" log "Installing Triton 3.2.0 for RTX 20 series compatibility"
python -m pip install --user --force-reinstall "triton==3.2.0" || { python -m pip install --no-cache-dir --user --force-reinstall "triton==3.2.0" || {
log "WARNING: Failed to install specific Triton version, using default" log "WARNING: Failed to install specific Triton version, using default"
python -m pip install --user --force-reinstall triton || true python -m pip install --no-cache-dir --user --force-reinstall triton || true
} }
;; ;;
"rtx50_capable") "rtx50_capable")
log "Installing latest Triton for RTX 50 series" log "Installing latest Triton for RTX 50 series"
python -m pip install --user --force-reinstall triton || \ python -m pip install --no-cache-dir --user --force-reinstall triton || \
python -m pip install --user --force-reinstall --pre triton || { python -m pip install --no-cache-dir --user --force-reinstall --pre triton || {
log "WARNING: Failed to install latest Triton, using stable" log "WARNING: Failed to install latest Triton, using stable >=3.2.0"
python -m pip install --user --force-reinstall "triton>=3.2.0" || true python -m pip install --no-cache-dir --user --force-reinstall "triton>=3.2.0" || true
} }
;; ;;
*) *)
log "Installing latest stable Triton" log "Installing latest stable Triton"
python -m pip install --user --force-reinstall triton || { python -m pip install --no-cache-dir --user --force-reinstall triton || {
log "WARNING: Triton installation failed, continuing without" log "WARNING: Triton installation failed, continuing without"
return 1 return 1
} }
@ -141,29 +129,35 @@ install_triton_version() {
esac esac
} }
# Function to compute CUDA arch list from torch
compute_cuda_arch_list() {
python - <<'PY' 2>/dev/null
import torch
archs = set()
if torch.cuda.is_available():
for i in range(torch.cuda.device_count()):
p = torch.cuda.get_device_properties(i)
archs.add(f"{p.major}.{p.minor}")
print(";".join(sorted(archs)))
PY
}
# Function to build Sage Attention with architecture-specific optimizations # Function to build Sage Attention with architecture-specific optimizations
build_sage_attention_mixed() { build_sage_attention_mixed() {
log "Building Sage Attention for mixed GPU environment..." log "Building Sage Attention for current GPU environment..."
# Create sage attention directory
mkdir -p "$SAGE_ATTENTION_DIR" mkdir -p "$SAGE_ATTENTION_DIR"
cd "$SAGE_ATTENTION_DIR" cd "$SAGE_ATTENTION_DIR"
# Set CUDA architecture list based on detected GPUs local cuda_arch_list
local cuda_arch_list="" cuda_arch_list="$(compute_cuda_arch_list || true)"
[ "$DETECTED_RTX20" = "true" ] && cuda_arch_list="${cuda_arch_list}7.5;" if [ -n "${cuda_arch_list:-}" ]; then
[ "$DETECTED_RTX30" = "true" ] && cuda_arch_list="${cuda_arch_list}8.6;" export TORCH_CUDA_ARCH_LIST="$cuda_arch_list"
[ "$DETECTED_RTX40" = "true" ] && cuda_arch_list="${cuda_arch_list}8.9;" log "Set TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST"
[ "$DETECTED_RTX50" = "true" ] && cuda_arch_list="${cuda_arch_list}12.0;" else
log "Could not infer TORCH_CUDA_ARCH_LIST from torch; proceeding with PyTorch defaults"
fi
# Remove trailing semicolon
cuda_arch_list=${cuda_arch_list%;}
# Export for PyTorch build
export TORCH_CUDA_ARCH_LIST="$cuda_arch_list"
log "Set TORCH_CUDA_ARCH_LIST=$TORCH_CUDA_ARCH_LIST"
# Clone or update repository based on strategy
case "$SAGE_STRATEGY" in case "$SAGE_STRATEGY" in
"mixed_with_rtx20"|"rtx20_only") "mixed_with_rtx20"|"rtx20_only")
log "Cloning Sage Attention v1.0 for RTX 20 series compatibility" log "Cloning Sage Attention v1.0 for RTX 20 series compatibility"
@ -183,7 +177,7 @@ build_sage_attention_mixed() {
if [ -d "SageAttention/.git" ]; then if [ -d "SageAttention/.git" ]; then
cd SageAttention cd SageAttention
git fetch --depth 1 origin || return 1 git fetch --depth 1 origin || return 1
git reset --hard origin/main || return 1 git reset --hard origin/HEAD || return 1
else else
rm -rf SageAttention rm -rf SageAttention
git clone --depth 1 https://github.com/thu-ml/SageAttention.git || return 1 git clone --depth 1 https://github.com/thu-ml/SageAttention.git || return 1
@ -192,10 +186,8 @@ build_sage_attention_mixed() {
;; ;;
esac esac
# Build with architecture-specific flags using --user installation log "Building Sage Attention..."
log "Building Sage Attention with multi-GPU support..." if MAX_JOBS=$(nproc) python -m pip install --no-cache-dir --user --no-build-isolation .; then
if MAX_JOBS=$(nproc) python -m pip install --user --no-build-isolation .; then
# Create strategy-specific built flag
echo "$SAGE_STRATEGY" > "$SAGE_ATTENTION_BUILT_FLAG" echo "$SAGE_STRATEGY" > "$SAGE_ATTENTION_BUILT_FLAG"
log "Sage Attention built successfully for strategy: $SAGE_STRATEGY" log "Sage Attention built successfully for strategy: $SAGE_STRATEGY"
cd "$BASE_DIR" cd "$BASE_DIR"
@ -210,17 +202,15 @@ build_sage_attention_mixed() {
# Function to check if current build matches detected GPUs # Function to check if current build matches detected GPUs
needs_rebuild() { needs_rebuild() {
if [ ! -f "$SAGE_ATTENTION_BUILT_FLAG" ]; then if [ ! -f "$SAGE_ATTENTION_BUILT_FLAG" ]; then
return 0 # Needs build return 0
fi fi
local built_strategy local built_strategy
built_strategy=$(cat "$SAGE_ATTENTION_BUILT_FLAG" 2>/dev/null || echo "unknown") built_strategy=$(cat "$SAGE_ATTENTION_BUILT_FLAG" 2>/dev/null || echo "unknown")
if [ "$built_strategy" != "$SAGE_STRATEGY" ]; then if [ "$built_strategy" != "$SAGE_STRATEGY" ]; then
log "GPU configuration changed (was: $built_strategy, now: $SAGE_STRATEGY) - rebuild needed" log "GPU configuration changed (was: $built_strategy, now: $SAGE_STRATEGY) - rebuild needed"
return 0 # Needs rebuild return 0
fi fi
return 1
return 1 # No rebuild needed
} }
# Function to check if Sage Attention is working # Function to check if Sage Attention is working
@ -230,7 +220,6 @@ import sys
try: try:
import sageattention import sageattention
print('[TEST] Sage Attention import: SUCCESS') print('[TEST] Sage Attention import: SUCCESS')
# Try to get version info
try: try:
if hasattr(sageattention, '__version__'): if hasattr(sageattention, '__version__'):
print(f'[TEST] Version: {sageattention.__version__}') print(f'[TEST] Version: {sageattention.__version__}')
@ -248,9 +237,9 @@ except Exception as e:
# Main GPU detection and Sage Attention setup # Main GPU detection and Sage Attention setup
setup_sage_attention() { setup_sage_attention() {
# DO NOT set SAGE_ATTENTION_AVAILABLE here; respect any user-provided env choice # Internal tracking and exported availability flag
# Track build status separately for logging/visibility
export SAGE_ATTENTION_BUILT=0 export SAGE_ATTENTION_BUILT=0
export SAGE_ATTENTION_AVAILABLE=0
# Detect GPU generations # Detect GPU generations
if ! detect_gpu_generations; then if ! detect_gpu_generations; then
@ -266,24 +255,25 @@ setup_sage_attention() {
log "Building Sage Attention..." log "Building Sage Attention..."
if install_triton_version && build_sage_attention_mixed && test_sage_attention; then if install_triton_version && build_sage_attention_mixed && test_sage_attention; then
export SAGE_ATTENTION_BUILT=1 export SAGE_ATTENTION_BUILT=1
log "Sage Attention is built and available; enable by setting SAGE_ATTENTION_AVAILABLE=1 or using --use-sage-attention explicitly" export SAGE_ATTENTION_AVAILABLE=1
log "Sage Attention is built and importable; enable at boot by setting FORCE_SAGE_ATTENTION=1"
else else
export SAGE_ATTENTION_BUILT=0 export SAGE_ATTENTION_BUILT=0
export SAGE_ATTENTION_AVAILABLE=0
log "WARNING: Sage Attention is not available after build attempt" log "WARNING: Sage Attention is not available after build attempt"
fi fi
else else
export SAGE_ATTENTION_BUILT=1 export SAGE_ATTENTION_BUILT=1
export SAGE_ATTENTION_AVAILABLE=1
log "Sage Attention already built and importable for current GPU configuration" log "Sage Attention already built and importable for current GPU configuration"
fi fi
} }
# If running as root, handle permissions and user mapping # If running as root, handle permissions and user mapping
if [ "$(id -u)" = "0" ]; then if [ "$(id -u)" = "0" ]; then
# Check if permissions are already set
if [ ! -f "$PERMISSIONS_SET_FLAG" ]; then if [ ! -f "$PERMISSIONS_SET_FLAG" ]; then
log "Setting up user permissions..." log "Setting up user permissions..."
# Map group to PGID if it already exists, otherwise remap the named group
if getent group "${PGID}" >/dev/null; then if getent group "${PGID}" >/dev/null; then
EXISTING_GRP="$(getent group "${PGID}" | cut -d: -f1)" EXISTING_GRP="$(getent group "${PGID}" | cut -d: -f1)"
usermod -g "${EXISTING_GRP}" "${APP_USER}" || true usermod -g "${EXISTING_GRP}" "${APP_USER}" || true
@ -292,16 +282,13 @@ if [ "$(id -u)" = "0" ]; then
groupmod -o -g "${PGID}" "${APP_GROUP}" || true groupmod -o -g "${PGID}" "${APP_GROUP}" || true
fi fi
# Map user to PUID
usermod -o -u "${PUID}" "${APP_USER}" || true usermod -o -u "${PUID}" "${APP_USER}" || true
# Ensure home and app dir exist and are owned
mkdir -p "/home/${APP_USER}" mkdir -p "/home/${APP_USER}"
for d in "$BASE_DIR" "/home/$APP_USER"; do for d in "$BASE_DIR" "/home/$APP_USER"; do
[ -e "$d" ] && chown -R "${APP_USER}:${APP_GROUP}" "$d" || true [ -e "$d" ] && chown -R "${APP_USER}:${APP_GROUP}" "$d" || true
done done
# Make Python system install targets writable for the runtime user (only under /usr/local)
readarray -t PY_PATHS < <(python - <<'PY' readarray -t PY_PATHS < <(python - <<'PY'
import sys, sysconfig, os, datetime import sys, sysconfig, os, datetime
def log(msg): def log(msg):
@ -326,7 +313,6 @@ log("Finished emitting target directories")
PY PY
) )
# Make directories writable
for d in "${PY_PATHS[@]}"; do for d in "${PY_PATHS[@]}"; do
case "$d" in case "$d" in
/usr/local|/usr/local/*) /usr/local|/usr/local/*)
@ -338,7 +324,6 @@ PY
esac esac
done done
# Create permissions set flag
touch "$PERMISSIONS_SET_FLAG" touch "$PERMISSIONS_SET_FLAG"
chown "${APP_USER}:${APP_GROUP}" "$PERMISSIONS_SET_FLAG" chown "${APP_USER}:${APP_GROUP}" "$PERMISSIONS_SET_FLAG"
log "User permissions configured" log "User permissions configured"
@ -346,7 +331,6 @@ PY
log "User permissions already configured, skipping..." log "User permissions already configured, skipping..."
fi fi
# Re-exec as the runtime user
exec runuser -u "${APP_USER}" -- "$0" "$@" exec runuser -u "${APP_USER}" -- "$0" "$@"
fi fi
@ -366,40 +350,52 @@ fi
# User-site PATHs for --user installs (custom nodes) # User-site PATHs for --user installs (custom nodes)
export PATH="$HOME/.local/bin:$PATH" export PATH="$HOME/.local/bin:$PATH"
pyver="$(python -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')" pyver="$(python -c 'import sys; print(f\"{sys.version_info.major}.{sys.version_info.minor}\")')"
export PYTHONPATH="$HOME/.local/lib/python${pyver}/site-packages:${PYTHONPATH:-}" export PYTHONPATH="$HOME/.local/lib/python${pyver}/site-packages:${PYTHONPATH:-}"
# Auto-install custom node deps # First-run detection for custom node deps (with override)
if [ "${COMFY_AUTO_INSTALL:-1}" = "1" ]; then RUN_NODE_INSTALL=0
log "Scanning custom nodes for requirements..." if [ ! -f "$FIRST_RUN_FLAG" ]; then
# Install any requirements*.txt found under custom_nodes (upgrade within constraints) RUN_NODE_INSTALL=1
while IFS= read -r -d '' req; do log "First run detected: installing custom node dependencies"
log "pip install --user --upgrade -r $req" elif [ "${COMFY_AUTO_INSTALL:-0}" = "1" ]; then
pip install --no-cache-dir --user --upgrade --upgrade-strategy only-if-needed -r "$req" || true RUN_NODE_INSTALL=1
done < <(find "$CUSTOM_NODES_DIR" -maxdepth 3 -type f \( -iname 'requirements.txt' -o -iname 'requirements-*.txt' -o -path '*/requirements/*.txt' \) -print0) log "COMFY_AUTO_INSTALL=1: forcing custom node dependency install"
else
# For pyproject.toml-based nodes, EXCLUDE ComfyUI-Manager (it's not meant to be wheel-built) log "Not first run and COMFY_AUTO_INSTALL!=1: skipping custom node dependency install"
while IFS= read -r -d '' pjt; do
d="$(dirname "$pjt")"
log "pip install --user . in $d"
(cd "$d" && pip install --no-cache-dir --user .) || true
done < <(find "$CUSTOM_NODES_DIR" -maxdepth 2 -type f -iname 'pyproject.toml' -not -path '*/ComfyUI-Manager/*' -print0)
pip check || true
fi fi
# Build ComfyUI command with Sage Attention flag only if user explicitly enabled it via env if [ "$RUN_NODE_INSTALL" = "1" ]; then
log "Scanning custom nodes for requirements..."
while IFS= read -r -d '' req; do
log "python -m pip install --user --upgrade -r $req"
python -m pip install --no-cache-dir --user --upgrade --upgrade-strategy only-if-needed -r "$req" || true
done < <(find "$CUSTOM_NODES_DIR" -maxdepth 3 -type f \( -iname 'requirements.txt' -o -iname 'requirements-*.txt' -o -path '*/requirements/*.txt' \) -print0)
while IFS= read -r -d '' pjt; do
d="$(dirname "$pjt")"
log "python -m pip install --user . in $d"
(cd "$d" && python -m pip install --no-cache-dir --user .) || true
done < <(find "$CUSTOM_NODES_DIR" -maxdepth 2 -type f -iname 'pyproject.toml' -not -path '*/ComfyUI-Manager/*' -print0)
python -m pip check || true
# Mark first run complete (or keep flag if already set)
touch "$FIRST_RUN_FLAG" || true
fi
# Build ComfyUI command with Sage Attention flag only if forced
COMFYUI_ARGS="" COMFYUI_ARGS=""
if [ "${SAGE_ATTENTION_AVAILABLE:-0}" = "1" ]; then if [ "${FORCE_SAGE_ATTENTION:-0}" = "1" ]; then
if test_sage_attention; then if test_sage_attention; then
COMFYUI_ARGS="--use-sage-attention" COMFYUI_ARGS="--use-sage-attention"
log "Starting ComfyUI with Sage Attention enabled by environment (SAGE_ATTENTION_AVAILABLE=1)" log "Starting ComfyUI with Sage Attention forced by environment (FORCE_SAGE_ATTENTION=1)"
else else
log "WARNING: SAGE_ATTENTION_AVAILABLE=1 but Sage Attention import failed; starting without" log "WARNING: FORCE_SAGE_ATTENTION=1 but Sage Attention import failed; starting without"
fi fi
else else
if [ "${SAGE_ATTENTION_BUILT:-0}" = "1" ]; then if [ "${SAGE_ATTENTION_AVAILABLE:-0}" = "1" ]; then
log "Sage Attention is built and available; set SAGE_ATTENTION_AVAILABLE=1 to enable it" log "Sage Attention is built and available; set FORCE_SAGE_ATTENTION=1 to enable it on boot"
else else
log "Sage Attention not available; starting without it" log "Sage Attention not available; starting without it"
fi fi
@ -409,16 +405,12 @@ cd "$BASE_DIR"
# Handle both direct execution and passed arguments # Handle both direct execution and passed arguments
if [ $# -eq 0 ]; then if [ $# -eq 0 ]; then
# No arguments passed, use default
exec python main.py --listen 0.0.0.0 $COMFYUI_ARGS exec python main.py --listen 0.0.0.0 $COMFYUI_ARGS
else else
# Arguments were passed, check if it's the default command if [ "${1:-}" = "python" ] && [ "${2:-}" = "main.py" ]; then
if [ "$1" = "python" ] && [ "${2:-}" = "main.py" ]; then shift 2
# Default python command, add our args
shift 2 # Remove 'python main.py'
exec python main.py $COMFYUI_ARGS "$@" exec python main.py $COMFYUI_ARGS "$@"
else else
# Custom command, pass through as-is
exec "$@" exec "$@"
fi fi
fi fi