mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-07 21:00:49 +08:00
Move log clearing to after all callbacks succeed, not before. This ensures that if any callback raises an exception, the logs remain available for retry on the next flush call instead of being lost. The previous approach cleared logs before iterating callbacks, which meant logs were permanently lost if any callback failed.
109 lines
3.2 KiB
Python
109 lines
3.2 KiB
Python
from collections import deque
|
|
from datetime import datetime
|
|
import io
|
|
import logging
|
|
import sys
|
|
import threading
|
|
|
|
logs = None
|
|
stdout_interceptor = None
|
|
stderr_interceptor = None
|
|
|
|
|
|
class LogInterceptor(io.TextIOWrapper):
|
|
def __init__(self, stream, *args, **kwargs):
|
|
buffer = stream.buffer
|
|
encoding = stream.encoding
|
|
super().__init__(buffer, *args, **kwargs, encoding=encoding, line_buffering=stream.line_buffering)
|
|
self._lock = threading.Lock()
|
|
self._flush_callbacks = []
|
|
self._logs_since_flush = []
|
|
|
|
def write(self, data):
|
|
entry = {"t": datetime.now().isoformat(), "m": data}
|
|
with self._lock:
|
|
self._logs_since_flush.append(entry)
|
|
|
|
# Simple handling for cr to overwrite the last output if it isnt a full line
|
|
# else logs just get full of progress messages
|
|
if isinstance(data, str) and data.startswith("\r") and not logs[-1]["m"].endswith("\n"):
|
|
logs.pop()
|
|
logs.append(entry)
|
|
super().write(data)
|
|
|
|
def flush(self):
|
|
try:
|
|
super().flush()
|
|
except OSError as e:
|
|
# errno 22 (EINVAL) can occur on Windows with piped/redirected streams
|
|
# This is safe to ignore as write() already succeeded
|
|
if e.errno != 22:
|
|
raise
|
|
if not self._logs_since_flush:
|
|
return
|
|
logs_to_send = self._logs_since_flush
|
|
for cb in self._flush_callbacks:
|
|
cb(logs_to_send)
|
|
# Only clear after all callbacks succeed - if any raises, logs remain for retry
|
|
self._logs_since_flush = []
|
|
|
|
def on_flush(self, callback):
|
|
self._flush_callbacks.append(callback)
|
|
|
|
|
|
def get_logs():
|
|
return logs
|
|
|
|
|
|
def on_flush(callback):
|
|
if stdout_interceptor is not None:
|
|
stdout_interceptor.on_flush(callback)
|
|
if stderr_interceptor is not None:
|
|
stderr_interceptor.on_flush(callback)
|
|
|
|
def setup_logger(log_level: str = 'INFO', capacity: int = 300, use_stdout: bool = False):
|
|
global logs
|
|
if logs:
|
|
return
|
|
|
|
# Override output streams and log to buffer
|
|
logs = deque(maxlen=capacity)
|
|
|
|
global stdout_interceptor
|
|
global stderr_interceptor
|
|
stdout_interceptor = sys.stdout = LogInterceptor(sys.stdout)
|
|
stderr_interceptor = sys.stderr = LogInterceptor(sys.stderr)
|
|
|
|
# Setup default global logger
|
|
logger = logging.getLogger()
|
|
logger.setLevel(log_level)
|
|
|
|
stream_handler = logging.StreamHandler()
|
|
stream_handler.setFormatter(logging.Formatter("%(message)s"))
|
|
|
|
if use_stdout:
|
|
# Only errors and critical to stderr
|
|
stream_handler.addFilter(lambda record: not record.levelno < logging.ERROR)
|
|
|
|
# Lesser to stdout
|
|
stdout_handler = logging.StreamHandler(sys.stdout)
|
|
stdout_handler.setFormatter(logging.Formatter("%(message)s"))
|
|
stdout_handler.addFilter(lambda record: record.levelno < logging.ERROR)
|
|
logger.addHandler(stdout_handler)
|
|
|
|
logger.addHandler(stream_handler)
|
|
|
|
|
|
STARTUP_WARNINGS = []
|
|
|
|
|
|
def log_startup_warning(msg):
|
|
logging.warning(msg)
|
|
STARTUP_WARNINGS.append(msg)
|
|
|
|
|
|
def print_startup_warnings():
|
|
for s in STARTUP_WARNINGS:
|
|
logging.warning(s)
|
|
STARTUP_WARNINGS.clear()
|