diff --git a/comfy_execution/jobs.py b/comfy_execution/jobs.py index a8c0eec79..95bfb1469 100644 --- a/comfy_execution/jobs.py +++ b/comfy_execution/jobs.py @@ -9,9 +9,10 @@ class JobStatus: PENDING = 'pending' IN_PROGRESS = 'in_progress' COMPLETED = 'completed' - ERROR = 'error' + FAILED = 'failed' + CANCELLED = 'cancelled' - ALL = [PENDING, IN_PROGRESS, COMPLETED, ERROR] + ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED, CANCELLED] # Media types that can be previewed in the frontend @@ -49,17 +50,17 @@ def is_previewable(media_type, item): def normalize_queue_item(item, status): """Convert queue item tuple to unified job dict.""" - priority, prompt_id, _, extra_data, _ = item[:5] + _, prompt_id, _, extra_data, _ = item[:5] create_time = extra_data.get('create_time') return { 'id': prompt_id, 'status': status, - 'priority': priority, 'create_time': create_time, - 'execution_time': None, 'error_message': None, 'execution_error': None, + 'execution_start_time': None, + 'execution_end_time': None, 'outputs_count': 0, 'preview_output': None, 'workflow_id': None, @@ -69,7 +70,7 @@ def normalize_queue_item(item, status): def normalize_history_item(prompt_id, history_item, include_outputs=False): """Convert history item dict to unified job dict.""" prompt_tuple = history_item['prompt'] - priority, _, prompt, extra_data, outputs_to_execute = prompt_tuple[:5] + _, _, prompt, extra_data, _ = prompt_tuple[:5] create_time = extra_data.get('create_time') status_info = history_item.get('status', {}) @@ -77,7 +78,7 @@ def normalize_history_item(prompt_id, history_item, include_outputs=False): if status_str == 'success': status = JobStatus.COMPLETED elif status_str == 'error': - status = JobStatus.ERROR + status = JobStatus.FAILED else: status = JobStatus.COMPLETED @@ -86,7 +87,7 @@ def normalize_history_item(prompt_id, history_item, include_outputs=False): error_message = None execution_error = None - if status == JobStatus.ERROR and status_info: + if status == JobStatus.FAILED and status_info: messages = status_info.get('messages', []) for entry in messages: if isinstance(entry, (list, tuple)) and len(entry) >= 2 and entry[0] == 'execution_error': @@ -96,15 +97,21 @@ def normalize_history_item(prompt_id, history_item, include_outputs=False): execution_error = detail break - execution_time = history_item.get('execution_time') + execution_time_duration = history_item.get('execution_time') + execution_start_time = None + execution_end_time = None + if execution_time_duration is not None and create_time is not None: + execution_end_time = create_time + int(execution_time_duration * 1000) + execution_start_time = create_time job = { 'id': prompt_id, 'status': status, - 'priority': priority, 'create_time': create_time, - 'execution_time': execution_time, 'error_message': error_message, + 'execution_error': execution_error, + 'execution_start_time': execution_start_time, + 'execution_end_time': execution_end_time, 'outputs_count': outputs_count, 'preview_output': preview_output, 'workflow_id': None, @@ -112,10 +119,11 @@ def normalize_history_item(prompt_id, history_item, include_outputs=False): if include_outputs: job['outputs'] = outputs - job['prompt'] = prompt - job['extra_data'] = extra_data - job['outputs_to_execute'] = outputs_to_execute - job['execution_error'] = execution_error + job['execution_status'] = status_info + job['workflow'] = { + 'prompt': prompt, + 'extra_data': extra_data, + } return job @@ -161,7 +169,9 @@ def apply_sorting(jobs, sort_by, sort_order): if sort_by == 'execution_time': def get_sort_key(job): - return job.get('execution_time') or 0 + start = job.get('execution_start_time') or 0 + end = job.get('execution_end_time') or 0 + return end - start if end and start else 0 else: def get_sort_key(job): return job.get('create_time') or 0 diff --git a/execution.py b/execution.py index 5fc616dfd..558fb8b5e 100644 --- a/execution.py +++ b/execution.py @@ -1276,11 +1276,11 @@ class PromptQueue: jobs.append(normalize_queue_item(item, JobStatus.PENDING)) include_completed = JobStatus.COMPLETED in status_filter - include_error = JobStatus.ERROR in status_filter - if include_completed or include_error: + include_failed = JobStatus.FAILED in status_filter + if include_completed or include_failed: for prompt_id, history_item in self.history.items(): - is_error = history_item.get('status', {}).get('status_str') == 'error' - if (is_error and include_error) or (not is_error and include_completed): + is_failed = history_item.get('status', {}).get('status_str') == 'error' + if (is_failed and include_failed) or (not is_failed and include_completed): jobs.append(normalize_history_item(prompt_id, history_item)) jobs = apply_sorting(jobs, sort_by, sort_order) diff --git a/server.py b/server.py index 0e0cc0daa..6e0b7f31a 100644 --- a/server.py +++ b/server.py @@ -762,8 +762,12 @@ class PromptServer(): return web.json_response({ 'jobs': jobs, - 'total': total, - 'has_more': has_more + 'pagination': { + 'offset': offset, + 'limit': limit, + 'total': total, + 'has_more': has_more + } }) @routes.get("/api/jobs/{job_id}") diff --git a/tests/execution/test_jobs.py b/tests/execution/test_jobs.py index a4a0dc510..479169643 100644 --- a/tests/execution/test_jobs.py +++ b/tests/execution/test_jobs.py @@ -18,15 +18,17 @@ class TestJobStatus: assert JobStatus.PENDING == 'pending' assert JobStatus.IN_PROGRESS == 'in_progress' assert JobStatus.COMPLETED == 'completed' - assert JobStatus.ERROR == 'error' + assert JobStatus.FAILED == 'failed' + assert JobStatus.CANCELLED == 'cancelled' def test_all_contains_all_statuses(self): """ALL should contain all status values.""" assert JobStatus.PENDING in JobStatus.ALL assert JobStatus.IN_PROGRESS in JobStatus.ALL assert JobStatus.COMPLETED in JobStatus.ALL - assert JobStatus.ERROR in JobStatus.ALL - assert len(JobStatus.ALL) == 4 + assert JobStatus.FAILED in JobStatus.ALL + assert JobStatus.CANCELLED in JobStatus.ALL + assert len(JobStatus.ALL) == 5 class TestIsPreviewable: @@ -217,9 +219,9 @@ class TestApplySorting: def test_sort_by_execution_time(self): """Sort by execution_time should order by duration.""" jobs = [ - {'id': 'a', 'create_time': 100, 'execution_time': 5.0}, - {'id': 'b', 'create_time': 300, 'execution_time': 1.0}, - {'id': 'c', 'create_time': 200, 'execution_time': 3.0}, + {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, # 5s + {'id': 'b', 'create_time': 300, 'execution_start_time': 300, 'execution_end_time': 1300}, # 1s + {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, # 3s ] result = apply_sorting(jobs, 'execution_time', 'desc') assert [j['id'] for j in result] == ['a', 'c', 'b'] @@ -227,9 +229,9 @@ class TestApplySorting: def test_sort_with_none_values(self): """Jobs with None values should sort as 0.""" jobs = [ - {'id': 'a', 'create_time': 100, 'execution_time': 5.0}, - {'id': 'b', 'create_time': 300, 'execution_time': None}, - {'id': 'c', 'create_time': 200, 'execution_time': 3.0}, + {'id': 'a', 'create_time': 100, 'execution_start_time': 100, 'execution_end_time': 5100}, + {'id': 'b', 'create_time': 300, 'execution_start_time': None, 'execution_end_time': None}, + {'id': 'c', 'create_time': 200, 'execution_start_time': 200, 'execution_end_time': 3200}, ] result = apply_sorting(jobs, 'execution_time', 'asc') assert result[0]['id'] == 'b' # None treated as 0, comes first @@ -251,9 +253,9 @@ class TestNormalizeQueueItem: assert job['id'] == 'prompt-123' assert job['status'] == 'pending' - assert job['priority'] == 10 assert job['create_time'] == 1234567890 - assert job['execution_time'] is None + assert job['execution_start_time'] is None + assert job['execution_end_time'] is None assert job['error_message'] is None assert job['outputs_count'] == 0 @@ -268,7 +270,7 @@ class TestNormalizeHistoryItem: 5, # priority 'prompt-456', {'nodes': {}}, - {'create_time': 1234567890}, + {'create_time': 1234567890000}, # milliseconds ['node1'], ), 'status': {'status_str': 'success', 'completed': True, 'messages': []}, @@ -279,10 +281,11 @@ class TestNormalizeHistoryItem: assert job['id'] == 'prompt-456' assert job['status'] == 'completed' - assert job['execution_time'] == 2.5 + assert job['execution_start_time'] == 1234567890000 + assert job['execution_end_time'] == 1234567890000 + 2500 # +2.5 seconds in ms - def test_error_job(self): - """Error history item should have error status and message.""" + def test_failed_job(self): + """Failed history item should have failed status and message.""" error_detail = { 'node_id': '5', 'node_type': 'KSampler', @@ -309,17 +312,13 @@ class TestNormalizeHistoryItem: 'execution_time': 1.0, } - # List view - no execution_error + # List view - includes execution_error job = normalize_history_item('prompt-789', history_item) - assert job['status'] == 'error' + assert job['status'] == 'failed' assert job['error_message'] == 'CUDA out of memory' - assert 'execution_error' not in job - - # Detail view - includes execution_error - job_detail = normalize_history_item('prompt-789', history_item, include_outputs=True) - assert job_detail['execution_error'] == error_detail - assert job_detail['execution_error']['node_id'] == '5' - assert job_detail['execution_error']['node_type'] == 'KSampler' + assert job['execution_error'] == error_detail + assert job['execution_error']['node_id'] == '5' + assert job['execution_error']['node_type'] == 'KSampler' def test_include_outputs(self): """When include_outputs=True, should include full output data.""" @@ -338,6 +337,10 @@ class TestNormalizeHistoryItem: job = normalize_history_item('prompt-123', history_item, include_outputs=True) assert 'outputs' in job - assert 'prompt' in job - assert 'extra_data' in job + assert 'workflow' in job + assert 'execution_status' in job assert job['outputs'] == {'node1': {'images': [{'filename': 'test.png'}]}} + assert job['workflow'] == { + 'prompt': {'nodes': {'1': {}}}, + 'extra_data': {'create_time': 1234567890, 'client_id': 'abc'}, + }