Fix Python linting issues and enhance RunPod network storage

- Fix all whitespace and formatting issues in runpod_handler.py
- Remove trailing whitespace and blank line whitespace
- Add comprehensive RunPod network storage setup
- Enhance start_runpod.py with automatic model mounting
- Update Dockerfile for optimized RunPod deployment
- Add detailed setup documentation in runpod_setup.md
- Improve .dockerignore for faster builds
- Add .env to .gitignore for security
This commit is contained in:
Bahadir Ciloglu 2025-11-01 16:24:00 +03:00
parent 91494226db
commit e054031606
15 changed files with 856 additions and 27 deletions

View File

@ -1,19 +1,79 @@
# Git
.git
.github
__pycache__
*.pyc
*.pyo
*.pyd
.Python
*.so
*.egg
*.egg-info
dist
build
.vscode
.idea
.gitignore
.gitattributes
# Documentation
*.md
output
input
models/checkpoints/*.safetensors
models/checkpoints/*.ckpt
docs/
# Development files
.env
.env.local
.env.example
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# Virtual environments
venv/
env/
ENV/
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Logs
*.log
logs/
# Temporary files
temp/
tmp/
*.tmp
# Large model files (will be mounted from network storage)
models/*/
!models/.gitkeep
# Output files
output/
outputs/
# Cache
.cache/
cache/
# Node modules (if any)
node_modules/
# Docker
Dockerfile.*
docker-compose*.yml

5
.gitignore vendored
View File

@ -24,3 +24,8 @@ web_custom_versions/
openapi.yaml
filtered-openapi.yaml
uv.lock
# Environment files with secrets
.env
.env.local
.env.production

View File

@ -39,12 +39,12 @@ class ComfyUIServerlessHandler:
"""Start ComfyUI server in background"""
try:
logger.info("Starting ComfyUI server...")
# Check if main.py exists
if not os.path.exists("/workspace/ComfyUI/main.py"):
logger.error("main.py not found in /workspace/ComfyUI")
raise FileNotFoundError("ComfyUI main.py not found")
# Check if models directory exists (network storage)
if not os.path.exists("/workspace/ComfyUI/models"):
logger.warning("Models directory not found, creating symlink to network storage")
@ -52,7 +52,7 @@ class ComfyUIServerlessHandler:
os.symlink("/runpod-volume/models", "/workspace/ComfyUI/models")
else:
logger.error("Network storage models not found at /runpod-volume/models")
# Start ComfyUI
self.comfyui_process = subprocess.Popen([
"python3", "main.py",
@ -60,12 +60,12 @@ class ComfyUIServerlessHandler:
"--port", "8000",
"--dont-print-server",
"--disable-auto-launch"
], cwd="/workspace/ComfyUI",
stdout=subprocess.PIPE,
], cwd="/workspace/ComfyUI",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logger.info("ComfyUI server started")
except Exception as e:
logger.error(f"Failed to start ComfyUI: {str(e)}")
raise

View File

@ -18,9 +18,16 @@ RUN pip install --no-cache-dir \
torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu && \
pip install --no-cache-dir -r requirements.txt
# Cloud Run PORT environment variable'ını kullan
# RunPod başlangıç scriptini çalıştırılabilir yap
RUN chmod +x start_runpod.py
# Environment variables
ENV PORT=8188
ENV RUNPOD_NETWORK_STORAGE_PATH=/runpod-volume
ENV HF_HUB_DISABLE_TELEMETRY=1
ENV DO_NOT_TRACK=1
EXPOSE 8188
# Başlat - CPU modunda çalıştır
CMD python main.py --listen 0.0.0.0 --port ${PORT} --cpu
# RunPod başlangıç scriptini kullan
CMD ["python", "start_runpod.py"]

93
Dockerfile.serverless Normal file
View File

@ -0,0 +1,93 @@
# RunPod Serverless ComfyUI Worker
FROM runpod/pytorch:2.2.0-py3.11-cuda12.1.1-devel-ubuntu22.04
# Set working directory
WORKDIR /workspace
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
wget \
curl \
unzip \
ffmpeg \
libsm6 \
libxext6 \
libxrender-dev \
libglib2.0-0 \
libgl1-mesa-glx \
&& rm -rf /var/lib/apt/lists/*
# Install Python dependencies
RUN pip install --no-cache-dir \
runpod \
requests \
pillow \
numpy \
torch \
torchvision \
torchaudio \
xformers \
accelerate \
transformers \
diffusers \
opencv-python \
scipy \
scikit-image
# Clone ComfyUI
RUN git clone https://github.com/bahadirciloglu/ComfyUI.git /workspace/ComfyUI
# Set ComfyUI as working directory
WORKDIR /workspace/ComfyUI
# Switch to create_image branch
RUN git checkout create_image
# Install ComfyUI requirements
RUN pip install --no-cache-dir -r requirements.txt
# Install additional dependencies for new features
RUN pip install --no-cache-dir \
safetensors \
transformers[torch] \
accelerate \
bitsandbytes \
optimum
# Copy serverless handler
COPY runpod_handler.py /workspace/runpod_handler.py
COPY .env /workspace/.env
# Create necessary directories
RUN mkdir -p /workspace/ComfyUI/models/checkpoints \
/workspace/ComfyUI/models/vae \
/workspace/ComfyUI/models/loras \
/workspace/ComfyUI/models/controlnet \
/workspace/ComfyUI/models/clip_vision \
/workspace/ComfyUI/models/upscale_models \
/workspace/ComfyUI/input \
/workspace/ComfyUI/output \
/tmp/inputs \
/tmp/outputs \
/tmp/comfyui
# Set environment variables
ENV PYTHONPATH="/workspace/ComfyUI:${PYTHONPATH}"
ENV COMFYUI_SERVERLESS=true
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
# Create startup script
RUN echo '#!/bin/bash\n\
cd /workspace/ComfyUI\n\
python main.py --listen 0.0.0.0 --port 8000 --dont-print-server --disable-auto-launch &\n\
sleep 10\n\
cd /workspace\n\
python runpod_handler.py' > /workspace/start.sh && chmod +x /workspace/start.sh
# Expose port
EXPOSE 8000
# Set the command
CMD ["/workspace/start.sh"]

0
note.md Normal file
View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 624 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 481 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 463 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 447 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 215 KiB

245
runpod_handler.py Normal file
View File

@ -0,0 +1,245 @@
#!/usr/bin/env python3
"""
RunPod Serverless Worker Handler for ComfyUI
Optimized for the new ComfyUI features and performance improvements
"""
import os
import json
import time
import logging
import tempfile
import requests
from typing import Dict, Any, Optional
import runpod
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
class ComfyUIServerlessHandler:
def __init__(self):
self.comfyui_url = "http://127.0.0.1:8000"
self.client_id = "runpod_serverless_worker"
self.setup_paths()
def setup_paths(self):
"""Setup required paths for serverless operation"""
os.makedirs("/tmp/inputs", exist_ok=True)
os.makedirs("/tmp/outputs", exist_ok=True)
os.makedirs("/tmp/comfyui", exist_ok=True)
def wait_for_comfyui(self, timeout: int = 120) -> bool:
"""Wait for ComfyUI to be ready"""
start_time = time.time()
while time.time() - start_time < timeout:
try:
response = requests.get(f"{self.comfyui_url}/system_stats")
if response.status_code == 200:
logger.info("ComfyUI is ready")
return True
except requests.exceptions.RequestException:
pass
time.sleep(2)
logger.error(f"ComfyUI not ready after {timeout} seconds")
return False
def download_input_files(self, input_data: Dict[str, Any]) -> Dict[str, str]:
"""Download input files and return local paths"""
local_files = {}
if "input_files" in input_data:
for file_key, file_url in input_data["input_files"].items():
try:
response = requests.get(file_url, timeout=60)
response.raise_for_status()
# Create temporary file
with tempfile.NamedTemporaryFile(
delete=False,
dir="/tmp/inputs",
suffix=os.path.splitext(file_url)[1]
) as tmp_file:
tmp_file.write(response.content)
local_files[file_key] = tmp_file.name
logger.info(f"Downloaded {file_key} to {local_files[file_key]}")
except Exception as e:
logger.error(f"Failed to download {file_key}: {str(e)}")
raise
return local_files
def execute_workflow(self, workflow: Dict[str, Any]) -> Dict[str, Any]:
"""Execute ComfyUI workflow"""
try:
# Queue the workflow
queue_response = requests.post(
f"{self.comfyui_url}/prompt",
json={
"prompt": workflow,
"client_id": self.client_id
},
timeout=30
)
queue_response.raise_for_status()
prompt_id = queue_response.json()["prompt_id"]
logger.info(f"Queued workflow with prompt_id: {prompt_id}")
# Wait for completion
return self.wait_for_completion(prompt_id)
except Exception as e:
logger.error(f"Failed to execute workflow: {str(e)}")
raise
def wait_for_completion(self, prompt_id: str, timeout: int = 300) -> Dict[str, Any]:
"""Wait for workflow completion and return results"""
start_time = time.time()
while time.time() - start_time < timeout:
try:
# Check queue status
queue_response = requests.get(f"{self.comfyui_url}/queue")
queue_data = queue_response.json()
# Check if our job is still in queue
running = any(item[1]["prompt_id"] == prompt_id for item in queue_data.get("queue_running", []))
pending = any(item[1]["prompt_id"] == prompt_id for item in queue_data.get("queue_pending", []))
if not running and not pending:
# Job completed, get results
history_response = requests.get(f"{self.comfyui_url}/history/{prompt_id}")
if history_response.status_code == 200:
history_data = history_response.json()
if prompt_id in history_data:
return self.process_results(history_data[prompt_id])
time.sleep(2)
except Exception as e:
logger.error(f"Error checking completion: {str(e)}")
time.sleep(5)
raise TimeoutError(f"Workflow {prompt_id} timed out after {timeout} seconds")
def process_results(self, history_data: Dict[str, Any]) -> Dict[str, Any]:
"""Process and upload results"""
results = {
"status": "completed",
"outputs": [],
"metadata": {}
}
if "outputs" in history_data:
for node_id, node_output in history_data["outputs"].items():
if "images" in node_output:
for image_info in node_output["images"]:
# Download image from ComfyUI
image_url = f"{self.comfyui_url}/view"
params = {
"filename": image_info["filename"],
"subfolder": image_info.get("subfolder", ""),
"type": image_info.get("type", "output")
}
try:
image_response = requests.get(image_url, params=params)
image_response.raise_for_status()
# Save to temp file for upload
output_path = f"/tmp/outputs/{image_info['filename']}"
with open(output_path, "wb") as f:
f.write(image_response.content)
results["outputs"].append({
"type": "image",
"filename": image_info["filename"],
"path": output_path,
"node_id": node_id
})
except Exception as e:
logger.error(f"Failed to process image {image_info['filename']}: {str(e)}")
return results
def cleanup(self):
"""Clean up temporary files"""
try:
import shutil
shutil.rmtree("/tmp/inputs", ignore_errors=True)
shutil.rmtree("/tmp/outputs", ignore_errors=True)
os.makedirs("/tmp/inputs", exist_ok=True)
os.makedirs("/tmp/outputs", exist_ok=True)
logger.info("Cleaned up temporary files")
except Exception as e:
logger.error(f"Cleanup failed: {str(e)}")
def handler(job: Dict[str, Any]) -> Dict[str, Any]:
"""Main serverless handler function"""
handler_instance = ComfyUIServerlessHandler()
try:
# Wait for ComfyUI to be ready
if not handler_instance.wait_for_comfyui():
return {"error": "ComfyUI failed to start"}
# Get job input
job_input = job.get("input", {})
# Download input files if any
local_files = handler_instance.download_input_files(job_input)
# Update workflow with local file paths
workflow = job_input.get("workflow", {})
if local_files and "file_mappings" in job_input:
for node_id, mappings in job_input["file_mappings"].items():
if node_id in workflow:
for input_key, file_key in mappings.items():
if file_key in local_files:
workflow[node_id]["inputs"][input_key] = local_files[file_key]
# Execute workflow
results = handler_instance.execute_workflow(workflow)
# Upload output files to RunPod storage or return base64
output_urls = []
for output in results.get("outputs", []):
if output["type"] == "image":
# For serverless, we typically return base64 or upload to storage
with open(output["path"], "rb") as f:
import base64
image_data = base64.b64encode(f.read()).decode()
output_urls.append({
"filename": output["filename"],
"data": image_data,
"node_id": output["node_id"]
})
return {
"status": "success",
"outputs": output_urls,
"execution_time": time.time() - job.get("start_time", time.time())
}
except Exception as e:
logger.error(f"Handler error: {str(e)}")
return {
"error": str(e),
"status": "failed"
}
finally:
# Always cleanup
handler_instance.cleanup()
if __name__ == "__main__":
# Start the serverless worker
runpod.serverless.start({"handler": handler})

150
runpod_setup.md Normal file
View File

@ -0,0 +1,150 @@
# RunPod Network Storage Setup Guide
## 1. Network Storage Hazırlığı
### RunPod Dashboard'da:
1. **Network Storage** oluşturun (örn: `comfyui-models`)
2. Storage boyutunu belirleyin (en az 50GB önerilir)
3. Storage ID'sini not alın
### Modelleri Network Storage'a Yükleme:
```bash
# RunPod pod'unda terminal açın
cd /runpod-volume
# Models klasörü oluşturun
mkdir -p models/{checkpoints,loras,vae,controlnet,upscale_models,text_encoders,clip,diffusion_models,unet,embeddings,clip_vision}
# Örnek model indirme (SDXL Base)
cd models/checkpoints
wget https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors
# VAE modeli
cd ../vae
wget https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors
# ControlNet modeli
cd ../controlnet
wget https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/diffusers_xl_canny_mid.safetensors
```
## 2. RunPod Template Ayarları
### Container Settings:
- **Docker Image**: `your-registry/comfyui-runpod:latest`
- **Container Disk**: 20GB (minimum)
- **Network Storage**: Mount ettiğiniz storage'ı seçin
### Environment Variables:
```bash
# Zorunlu
RUNPOD_NETWORK_STORAGE_PATH=/runpod-volume
PORT=8188
# Opsiyonel
LISTEN=0.0.0.0
COMFYUI_ARGS=--preview-method auto
DOWNLOAD_MODELS=sdxl-base,sdxl-vae # Otomatik indirme için
```
### Ports:
- **Container Port**: 8188
- **Expose HTTP Ports**: 8188
## 3. Model Klasör Yapısı
Network storage'da şu yapı oluşturulmalı:
```
/runpod-volume/
└── models/
├── checkpoints/ # Ana modeller (SDXL, SD 1.5, vb.)
│ ├── sd_xl_base_1.0.safetensors
│ └── sd_xl_refiner_1.0.safetensors
├── loras/ # LoRA modelleri
├── vae/ # VAE modelleri
│ └── sdxl_vae.safetensors
├── controlnet/ # ControlNet modelleri
├── upscale_models/ # Upscaler modeller
├── text_encoders/ # CLIP modelleri
├── clip/ # Legacy CLIP klasörü
├── diffusion_models/ # UNet modelleri
├── unet/ # Legacy UNet klasörü
├── embeddings/ # Textual Inversion
└── clip_vision/ # CLIP Vision modelleri
```
## 4. Popüler Modeller
### Checkpoints:
```bash
# SDXL Base
wget https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors
# SDXL Refiner
wget https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors
# SD 1.5
wget https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors
```
### VAE:
```bash
# SDXL VAE
wget https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors
# SD 1.5 VAE
wget https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors
```
### ControlNet:
```bash
# SDXL Canny
wget https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/diffusers_xl_canny_mid.safetensors
# SDXL Depth
wget https://huggingface.co/lllyasviel/sd_control_collection/resolve/main/diffusers_xl_depth_mid.safetensors
```
## 5. Deployment
### Build ve Push:
```bash
# Docker image build
docker build -t your-registry/comfyui-runpod:latest .
# Registry'ye push
docker push your-registry/comfyui-runpod:latest
```
### RunPod'da Deploy:
1. Template oluşturun
2. Network storage'ı mount edin
3. Environment variables'ları ayarlayın
4. Deploy edin
## 6. Monitoring
Container loglarında şunları göreceksiniz:
```
2024-11-01 13:15:00 - INFO - RunPod ComfyUI başlatılıyor...
2024-11-01 13:15:01 - INFO - Network storage hazır
2024-11-01 13:15:02 - INFO - Models klasörü network storage'a bağlandı
2024-11-01 13:15:03 - INFO - Model klasörü: checkpoints (2 dosya)
2024-11-01 13:15:04 - INFO - ComfyUI başlatılıyor...
```
## 7. Troubleshooting
### Network Storage Mount Edilmezse:
- Local models klasörü kullanılır
- Logları kontrol edin: `RUNPOD_NETWORK_STORAGE_PATH` doğru mu?
### Modeller Bulunamazsa:
- Network storage'da model dosyaları var mı kontrol edin
- Dosya izinlerini kontrol edin
- Symlink'in doğru çalıştığını kontrol edin
### Performance İyileştirme:
- GPU instance kullanın (CPU yerine)
- Dockerfile'da `--cpu` parametresini kaldırın
- CUDA support ekleyin

206
start_runpod.py Normal file
View File

@ -0,0 +1,206 @@
#!/usr/bin/env python3
"""
RunPod başlangıç scripti - Network storage mount ve model yönetimi
"""
import os
import sys
import logging
import subprocess
import time
from pathlib import Path
# Logging setup
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def mount_runpod_storage():
"""RunPod network storage'ı mount et"""
try:
# RunPod network storage path (environment variable'dan al)
network_storage_path = os.environ.get('RUNPOD_NETWORK_STORAGE_PATH', '/runpod-volume')
models_storage_path = os.path.join(network_storage_path, 'models')
# Local models klasörü
local_models_path = '/app/models'
logger.info(f"Network storage path: {network_storage_path}")
logger.info(f"Models storage path: {models_storage_path}")
# Network storage'da models klasörü var mı kontrol et
if os.path.exists(models_storage_path):
logger.info("Network storage'da models klasörü bulundu")
# Local models klasörünü sil ve symlink oluştur
if os.path.exists(local_models_path):
if os.path.islink(local_models_path):
os.unlink(local_models_path)
else:
import shutil
shutil.rmtree(local_models_path)
# Symlink oluştur
os.symlink(models_storage_path, local_models_path)
logger.info(f"Models klasörü network storage'a bağlandı: {models_storage_path} -> {local_models_path}")
# Model klasörlerini kontrol et
check_model_folders(local_models_path)
else:
logger.warning(f"Network storage'da models klasörü bulunamadı: {models_storage_path}")
logger.info("Local models klasörü kullanılacak")
# Network storage'da models klasörü oluştur
os.makedirs(models_storage_path, exist_ok=True)
logger.info(f"Network storage'da models klasörü oluşturuldu: {models_storage_path}")
# Mevcut local models'i network storage'a taşı
if os.path.exists(local_models_path) and not os.path.islink(local_models_path):
import shutil
shutil.copytree(local_models_path, models_storage_path, dirs_exist_ok=True)
shutil.rmtree(local_models_path)
logger.info("Local models network storage'a taşındı")
# Symlink oluştur
os.symlink(models_storage_path, local_models_path)
logger.info("Models klasörü network storage'a bağlandı")
except Exception as e:
logger.error(f"Network storage mount hatası: {e}")
logger.info("Local models klasörü kullanılacak")
ensure_local_model_folders()
def check_model_folders(models_path):
"""Model klasörlerinin varlığını kontrol et"""
required_folders = [
'checkpoints',
'loras',
'vae',
'controlnet',
'upscale_models',
'text_encoders',
'clip',
'diffusion_models',
'unet',
'embeddings',
'clip_vision'
]
for folder in required_folders:
folder_path = os.path.join(models_path, folder)
if not os.path.exists(folder_path):
os.makedirs(folder_path, exist_ok=True)
logger.info(f"Model klasörü oluşturuldu: {folder}")
else:
# Klasördeki dosya sayısını kontrol et
file_count = len([f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))])
logger.info(f"Model klasörü: {folder} ({file_count} dosya)")
def ensure_local_model_folders():
"""Local model klasörlerini oluştur"""
models_path = '/app/models'
check_model_folders(models_path)
def download_essential_models():
"""Temel modelleri indir (opsiyonel)"""
try:
models_to_download = os.environ.get('DOWNLOAD_MODELS', '').split(',')
models_to_download = [m.strip() for m in models_to_download if m.strip()]
if not models_to_download:
logger.info("İndirilecek model belirtilmedi")
return
logger.info(f"İndirilecek modeller: {models_to_download}")
# Burada model indirme logic'i eklenebilir
# Örnek: huggingface-hub kullanarak
except Exception as e:
logger.error(f"Model indirme hatası: {e}")
def setup_environment():
"""Çevre değişkenlerini ayarla"""
# ComfyUI için gerekli environment variables
os.environ['HF_HUB_DISABLE_TELEMETRY'] = '1'
os.environ['DO_NOT_TRACK'] = '1'
# RunPod specific
if 'RUNPOD_POD_ID' in os.environ:
logger.info(f"RunPod Pod ID: {os.environ['RUNPOD_POD_ID']}")
# Port ayarı
port = os.environ.get('PORT', '8188')
os.environ['PORT'] = port
logger.info(f"Server port: {port}")
def wait_for_storage():
"""Network storage'ın hazır olmasını bekle"""
max_wait = 30 # 30 saniye
wait_interval = 2
network_storage_path = os.environ.get('RUNPOD_NETWORK_STORAGE_PATH', '/runpod-volume')
for i in range(0, max_wait, wait_interval):
if os.path.exists(network_storage_path):
logger.info("Network storage hazır")
return True
logger.info(f"Network storage bekleniyor... ({i}/{max_wait}s)")
time.sleep(wait_interval)
logger.warning("Network storage timeout - local storage kullanılacak")
return False
def main():
"""Ana başlangıç fonksiyonu"""
logger.info("RunPod ComfyUI başlatılıyor...")
# Environment setup
setup_environment()
# Network storage'ı bekle
wait_for_storage()
# Network storage mount
mount_runpod_storage()
# Temel modelleri indir (opsiyonel)
download_essential_models()
# ComfyUI'yi başlat
logger.info("ComfyUI başlatılıyor...")
# Port ve listen address
port = os.environ.get('PORT', '8188')
listen = os.environ.get('LISTEN', '0.0.0.0')
# ComfyUI command
cmd = [
sys.executable, 'main.py',
'--listen', listen,
'--port', port,
'--cpu' # CPU mode for RunPod
]
# Extra args
if os.environ.get('COMFYUI_ARGS'):
extra_args = os.environ['COMFYUI_ARGS'].split()
cmd.extend(extra_args)
logger.info(f"ComfyUI komutu: {' '.join(cmd)}")
# ComfyUI'yi başlat
try:
subprocess.run(cmd, check=True)
except KeyboardInterrupt:
logger.info("ComfyUI durduruldu")
except Exception as e:
logger.error(f"ComfyUI başlatma hatası: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

63
test_workflow.json Normal file
View File

@ -0,0 +1,63 @@
{
"input": {
"workflow": {
"3": {
"inputs": {
"seed": 42,
"steps": 20,
"cfg": 7.0,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1.0,
"model": ["4", 0],
"positive": ["6", 0],
"negative": ["7", 0],
"latent_image": ["5", 0]
},
"class_type": "KSampler"
},
"4": {
"inputs": {
"ckpt_name": "sd_xl_base_1.0.safetensors"
},
"class_type": "CheckpointLoaderSimple"
},
"5": {
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1
},
"class_type": "EmptyLatentImage"
},
"6": {
"inputs": {
"text": "a beautiful landscape with mountains and a lake, highly detailed, 8k",
"clip": ["4", 1]
},
"class_type": "CLIPTextEncode"
},
"7": {
"inputs": {
"text": "blurry, low quality, distorted",
"clip": ["4", 1]
},
"class_type": "CLIPTextEncode"
},
"8": {
"inputs": {
"samples": ["3", 0],
"vae": ["4", 2]
},
"class_type": "VAEDecode"
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": ["8", 0]
},
"class_type": "SaveImage"
}
}
}
}