use ws messages to get start and end times

This commit is contained in:
Richard Yu 2025-12-04 12:51:24 -08:00
parent 1b673069fe
commit 5274b91f25
4 changed files with 45 additions and 43 deletions

View File

@ -90,21 +90,20 @@ def normalize_history_item(prompt_id, history_item, include_outputs=False):
outputs_count, preview_output = get_outputs_summary(outputs) outputs_count, preview_output = get_outputs_summary(outputs)
execution_error = None execution_error = None
if status == JobStatus.FAILED and status_info:
messages = status_info.get('messages', [])
for entry in messages:
if isinstance(entry, (list, tuple)) and len(entry) >= 2 and entry[0] == 'execution_error':
detail = entry[1]
if isinstance(detail, dict):
execution_error = detail
break
execution_duration = history_item.get('execution_duration')
execution_start_time = None execution_start_time = None
execution_end_time = None execution_end_time = None
if execution_duration is not None and create_time is not None: if status_info:
execution_end_time = create_time + int(execution_duration * 1000) messages = status_info.get('messages', [])
execution_start_time = create_time for entry in messages:
if isinstance(entry, (list, tuple)) and len(entry) >= 2:
event_name, event_data = entry[0], entry[1]
if isinstance(event_data, dict):
if event_name == 'execution_start':
execution_start_time = event_data.get('timestamp')
elif event_name in ('execution_success', 'execution_error', 'execution_interrupted'):
execution_end_time = event_data.get('timestamp')
if event_name == 'execution_error':
execution_error = event_data
job = { job = {
'id': prompt_id, 'id': prompt_id,

View File

@ -1136,8 +1136,7 @@ class PromptQueue:
messages: List[str] messages: List[str]
def task_done(self, item_id, history_result, def task_done(self, item_id, history_result,
status: Optional['PromptQueue.ExecutionStatus'], process_item=None, status: Optional['PromptQueue.ExecutionStatus'], process_item=None):
execution_duration: Optional[float] = None):
with self.mutex: with self.mutex:
prompt = self.currently_running.pop(item_id) prompt = self.currently_running.pop(item_id)
if len(self.history) > MAXIMUM_HISTORY_SIZE: if len(self.history) > MAXIMUM_HISTORY_SIZE:
@ -1154,7 +1153,6 @@ class PromptQueue:
"prompt": prompt, "prompt": prompt,
"outputs": {}, "outputs": {},
'status': status_dict, 'status': status_dict,
'execution_duration': execution_duration,
} }
self.history[prompt[1]].update(history_result) self.history[prompt[1]].update(history_result)
self.server.queue_updated() self.server.queue_updated()

13
main.py
View File

@ -230,7 +230,7 @@ def prompt_worker(q, server_instance):
need_gc = True need_gc = True
current_time = time.perf_counter() current_time = time.perf_counter()
execution_duration = current_time - execution_start_time execution_time = current_time - execution_start_time
remove_sensitive = lambda prompt: prompt[:5] + prompt[6:] remove_sensitive = lambda prompt: prompt[:5] + prompt[6:]
q.task_done(item_id, q.task_done(item_id,
@ -239,17 +239,16 @@ def prompt_worker(q, server_instance):
status_str='success' if e.success else 'error', status_str='success' if e.success else 'error',
completed=e.success, completed=e.success,
messages=e.status_messages), messages=e.status_messages),
process_item=remove_sensitive, process_item=remove_sensitive)
execution_duration=execution_duration)
if server_instance.client_id is not None: if server_instance.client_id is not None:
server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id) server_instance.send_sync("executing", {"node": None, "prompt_id": prompt_id}, server_instance.client_id)
# Log Time in a more readable way after 10 minutes # Log Time in a more readable way after 10 minutes
if execution_duration > 600: if execution_time > 600:
execution_duration_str = time.strftime("%H:%M:%S", time.gmtime(execution_duration)) execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time))
logging.info(f"Prompt executed in {execution_duration_str}") logging.info(f"Prompt executed in {execution_time}")
else: else:
logging.info("Prompt executed in {:.2f} seconds".format(execution_duration)) logging.info("Prompt executed in {:.2f} seconds".format(execution_time))
flags = q.get_flags() flags = q.get_flags()
free_memory = flags.get("free_memory", False) free_memory = flags.get("free_memory", False)

View File

@ -269,7 +269,7 @@ class TestNormalizeHistoryItem:
"""Unit tests for normalize_history_item()""" """Unit tests for normalize_history_item()"""
def test_completed_job(self): def test_completed_job(self):
"""Completed history item should have correct status.""" """Completed history item should have correct status and times from messages."""
history_item = { history_item = {
'prompt': ( 'prompt': (
5, # priority 5, # priority
@ -278,54 +278,61 @@ class TestNormalizeHistoryItem:
{ {
'create_time': 1234567890000, 'create_time': 1234567890000,
'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}} 'extra_pnginfo': {'workflow': {'id': 'workflow-xyz'}}
}, # milliseconds },
['node1'], ['node1'],
), ),
'status': {'status_str': 'success', 'completed': True, 'messages': []}, 'status': {
'status_str': 'success',
'completed': True,
'messages': [
('execution_start', {'prompt_id': 'prompt-456', 'timestamp': 1234567890500}),
('execution_success', {'prompt_id': 'prompt-456', 'timestamp': 1234567893000}),
]
},
'outputs': {}, 'outputs': {},
'execution_duration': 2.5,
} }
job = normalize_history_item('prompt-456', history_item) job = normalize_history_item('prompt-456', history_item)
assert job['id'] == 'prompt-456' assert job['id'] == 'prompt-456'
assert job['status'] == 'completed' assert job['status'] == 'completed'
assert job['priority'] == 5 assert job['priority'] == 5
assert job['execution_start_time'] == 1234567890000 assert job['execution_start_time'] == 1234567890500
assert job['execution_end_time'] == 1234567890000 + 2500 # +2.5 seconds in ms assert job['execution_end_time'] == 1234567893000
assert job['workflow_id'] == 'workflow-xyz' assert job['workflow_id'] == 'workflow-xyz'
def test_failed_job(self): def test_failed_job(self):
"""Failed history item should have failed status and message.""" """Failed history item should have failed status and error from messages."""
error_detail = {
'node_id': '5',
'node_type': 'KSampler',
'exception_message': 'CUDA out of memory',
'exception_type': 'RuntimeError',
'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],
}
history_item = { history_item = {
'prompt': ( 'prompt': (
5, 5,
'prompt-789', 'prompt-789',
{'nodes': {}}, {'nodes': {}},
{'create_time': 1234567890}, {'create_time': 1234567890000},
['node1'], ['node1'],
), ),
'status': { 'status': {
'status_str': 'error', 'status_str': 'error',
'completed': False, 'completed': False,
'messages': [ 'messages': [
('execution_error', error_detail) ('execution_start', {'prompt_id': 'prompt-789', 'timestamp': 1234567890500}),
('execution_error', {
'prompt_id': 'prompt-789',
'node_id': '5',
'node_type': 'KSampler',
'exception_message': 'CUDA out of memory',
'exception_type': 'RuntimeError',
'traceback': ['Traceback...', 'RuntimeError: CUDA out of memory'],
'timestamp': 1234567891000,
})
] ]
}, },
'outputs': {}, 'outputs': {},
'execution_duration': 1.0,
} }
# List view - includes execution_error
job = normalize_history_item('prompt-789', history_item) job = normalize_history_item('prompt-789', history_item)
assert job['status'] == 'failed' assert job['status'] == 'failed'
assert job['execution_error'] == error_detail assert job['execution_start_time'] == 1234567890500
assert job['execution_end_time'] == 1234567891000
assert job['execution_error']['node_id'] == '5' assert job['execution_error']['node_id'] == '5'
assert job['execution_error']['node_type'] == 'KSampler' assert job['execution_error']['node_type'] == 'KSampler'
assert job['execution_error']['exception_message'] == 'CUDA out of memory' assert job['execution_error']['exception_message'] == 'CUDA out of memory'
@ -342,7 +349,6 @@ class TestNormalizeHistoryItem:
), ),
'status': {'status_str': 'success', 'completed': True, 'messages': []}, 'status': {'status_str': 'success', 'completed': True, 'messages': []},
'outputs': {'node1': {'images': [{'filename': 'test.png'}]}}, 'outputs': {'node1': {'images': [{'filename': 'test.png'}]}},
'execution_duration': 2.5,
} }
job = normalize_history_item('prompt-123', history_item, include_outputs=True) job = normalize_history_item('prompt-123', history_item, include_outputs=True)