mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-10 14:20:49 +08:00
Compare commits
5 Commits
630a1dce01
...
09f19eb7ae
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
09f19eb7ae | ||
|
|
69603be5d2 | ||
|
|
ec0a832acb | ||
|
|
04c49a29b4 | ||
|
|
04c0dd0737 |
@ -81,7 +81,8 @@ def get_comfy_models_folders() -> list[tuple[str, list[str]]]:
|
||||
"""
|
||||
targets: list[tuple[str, list[str]]] = []
|
||||
models_root = os.path.abspath(folder_paths.models_dir)
|
||||
for name, (paths, _exts) in folder_paths.folder_names_and_paths.items():
|
||||
for name, values in folder_paths.folder_names_and_paths.items():
|
||||
paths, _exts = values[0], values[1] # NOTE: this prevents nodepacks that hackily edit folder_... from breaking ComfyUI
|
||||
if any(os.path.abspath(p).startswith(models_root + os.sep) for p in paths):
|
||||
targets.append((name, paths))
|
||||
return targets
|
||||
|
||||
@ -409,8 +409,137 @@ class LTXV(LatentFormat):
|
||||
|
||||
class LTXAV(LTXV):
|
||||
def __init__(self):
|
||||
self.latent_rgb_factors = None
|
||||
self.latent_rgb_factors_bias = None
|
||||
self.latent_rgb_factors = [
|
||||
[ 0.0350, 0.0159, 0.0132],
|
||||
[ 0.0025, -0.0021, -0.0003],
|
||||
[ 0.0286, 0.0028, 0.0020],
|
||||
[ 0.0280, -0.0114, -0.0202],
|
||||
[-0.0186, 0.0073, 0.0092],
|
||||
[ 0.0027, 0.0097, -0.0113],
|
||||
[-0.0069, -0.0032, -0.0024],
|
||||
[-0.0323, -0.0370, -0.0457],
|
||||
[ 0.0174, 0.0164, 0.0106],
|
||||
[-0.0097, 0.0061, 0.0035],
|
||||
[-0.0130, -0.0042, -0.0012],
|
||||
[-0.0102, -0.0002, -0.0091],
|
||||
[-0.0025, 0.0063, 0.0161],
|
||||
[ 0.0003, 0.0037, 0.0108],
|
||||
[ 0.0152, 0.0082, 0.0143],
|
||||
[ 0.0317, 0.0203, 0.0312],
|
||||
[-0.0092, -0.0233, -0.0119],
|
||||
[-0.0405, -0.0226, -0.0023],
|
||||
[ 0.0376, 0.0397, 0.0352],
|
||||
[ 0.0171, -0.0043, -0.0095],
|
||||
[ 0.0482, 0.0341, 0.0213],
|
||||
[ 0.0031, -0.0046, -0.0018],
|
||||
[-0.0486, -0.0383, -0.0294],
|
||||
[-0.0071, -0.0272, -0.0123],
|
||||
[ 0.0320, 0.0218, 0.0289],
|
||||
[ 0.0327, 0.0088, -0.0116],
|
||||
[-0.0098, -0.0240, -0.0111],
|
||||
[ 0.0094, -0.0116, 0.0021],
|
||||
[ 0.0309, 0.0092, 0.0165],
|
||||
[-0.0065, -0.0077, -0.0107],
|
||||
[ 0.0179, 0.0114, 0.0038],
|
||||
[-0.0018, -0.0030, -0.0026],
|
||||
[-0.0002, 0.0076, -0.0029],
|
||||
[-0.0131, -0.0059, -0.0170],
|
||||
[ 0.0055, 0.0066, -0.0038],
|
||||
[ 0.0154, 0.0063, 0.0090],
|
||||
[ 0.0186, 0.0175, 0.0188],
|
||||
[-0.0166, -0.0381, -0.0428],
|
||||
[ 0.0121, 0.0015, -0.0153],
|
||||
[ 0.0118, 0.0050, 0.0019],
|
||||
[ 0.0125, 0.0259, 0.0231],
|
||||
[ 0.0046, 0.0130, 0.0081],
|
||||
[ 0.0271, 0.0250, 0.0250],
|
||||
[-0.0054, -0.0347, -0.0326],
|
||||
[-0.0438, -0.0262, -0.0228],
|
||||
[-0.0191, -0.0256, -0.0173],
|
||||
[-0.0205, -0.0058, 0.0042],
|
||||
[ 0.0404, 0.0434, 0.0346],
|
||||
[-0.0242, -0.0177, -0.0146],
|
||||
[ 0.0161, 0.0223, 0.0168],
|
||||
[-0.0240, -0.0320, -0.0299],
|
||||
[-0.0019, 0.0043, 0.0008],
|
||||
[-0.0060, -0.0133, -0.0244],
|
||||
[-0.0048, -0.0225, -0.0167],
|
||||
[ 0.0267, 0.0133, 0.0152],
|
||||
[ 0.0222, 0.0167, 0.0028],
|
||||
[ 0.0015, -0.0062, 0.0013],
|
||||
[-0.0241, -0.0178, -0.0079],
|
||||
[ 0.0040, -0.0081, -0.0097],
|
||||
[-0.0064, 0.0133, -0.0011],
|
||||
[-0.0204, -0.0231, -0.0304],
|
||||
[ 0.0011, -0.0011, 0.0145],
|
||||
[-0.0283, -0.0259, -0.0260],
|
||||
[ 0.0038, 0.0171, -0.0029],
|
||||
[ 0.0637, 0.0424, 0.0409],
|
||||
[ 0.0092, 0.0163, 0.0188],
|
||||
[ 0.0082, 0.0055, -0.0179],
|
||||
[-0.0177, -0.0286, -0.0147],
|
||||
[ 0.0171, 0.0242, 0.0398],
|
||||
[-0.0129, 0.0095, -0.0071],
|
||||
[-0.0154, 0.0036, 0.0128],
|
||||
[-0.0081, -0.0009, 0.0118],
|
||||
[-0.0067, -0.0178, -0.0230],
|
||||
[-0.0022, -0.0125, -0.0003],
|
||||
[-0.0032, -0.0039, -0.0022],
|
||||
[-0.0005, -0.0127, -0.0131],
|
||||
[-0.0143, -0.0157, -0.0165],
|
||||
[-0.0262, -0.0263, -0.0270],
|
||||
[ 0.0063, 0.0127, 0.0178],
|
||||
[ 0.0092, 0.0133, 0.0150],
|
||||
[-0.0106, -0.0068, 0.0032],
|
||||
[-0.0214, -0.0022, 0.0171],
|
||||
[-0.0104, -0.0266, -0.0362],
|
||||
[ 0.0021, 0.0048, -0.0005],
|
||||
[ 0.0345, 0.0431, 0.0402],
|
||||
[-0.0275, -0.0110, -0.0195],
|
||||
[ 0.0203, 0.0251, 0.0224],
|
||||
[ 0.0016, -0.0037, -0.0094],
|
||||
[ 0.0241, 0.0198, 0.0114],
|
||||
[-0.0003, 0.0027, 0.0141],
|
||||
[ 0.0012, -0.0052, -0.0084],
|
||||
[ 0.0057, -0.0028, -0.0163],
|
||||
[-0.0488, -0.0545, -0.0509],
|
||||
[-0.0076, -0.0025, -0.0014],
|
||||
[-0.0249, -0.0142, -0.0367],
|
||||
[ 0.0136, 0.0041, 0.0135],
|
||||
[ 0.0007, 0.0034, -0.0053],
|
||||
[-0.0068, -0.0109, 0.0029],
|
||||
[ 0.0006, -0.0237, -0.0094],
|
||||
[-0.0149, -0.0177, -0.0131],
|
||||
[-0.0105, 0.0039, 0.0216],
|
||||
[ 0.0242, 0.0200, 0.0180],
|
||||
[-0.0339, -0.0153, -0.0195],
|
||||
[ 0.0104, 0.0151, 0.0120],
|
||||
[-0.0043, 0.0089, 0.0047],
|
||||
[ 0.0157, -0.0030, 0.0008],
|
||||
[ 0.0126, 0.0102, -0.0040],
|
||||
[ 0.0040, 0.0114, 0.0137],
|
||||
[ 0.0423, 0.0473, 0.0436],
|
||||
[-0.0128, -0.0066, -0.0152],
|
||||
[-0.0337, -0.0087, -0.0026],
|
||||
[-0.0052, 0.0235, 0.0291],
|
||||
[ 0.0079, 0.0154, 0.0260],
|
||||
[-0.0539, -0.0377, -0.0358],
|
||||
[-0.0188, 0.0062, -0.0035],
|
||||
[-0.0186, 0.0041, -0.0083],
|
||||
[ 0.0045, -0.0049, 0.0053],
|
||||
[ 0.0172, 0.0071, 0.0042],
|
||||
[-0.0003, -0.0078, -0.0096],
|
||||
[-0.0209, -0.0132, -0.0135],
|
||||
[-0.0074, 0.0017, 0.0099],
|
||||
[-0.0038, 0.0070, 0.0014],
|
||||
[-0.0013, -0.0017, 0.0073],
|
||||
[ 0.0030, 0.0105, 0.0105],
|
||||
[ 0.0154, -0.0168, -0.0235],
|
||||
[-0.0108, -0.0038, 0.0047],
|
||||
[-0.0298, -0.0347, -0.0436],
|
||||
[-0.0206, -0.0189, -0.0139]
|
||||
]
|
||||
self.latent_rgb_factors_bias = [0.2796, 0.1101, -0.0047]
|
||||
|
||||
class HunyuanVideo(LatentFormat):
|
||||
latent_channels = 16
|
||||
|
||||
@ -14,8 +14,9 @@ class JobStatus:
|
||||
IN_PROGRESS = 'in_progress'
|
||||
COMPLETED = 'completed'
|
||||
FAILED = 'failed'
|
||||
CANCELLED = 'cancelled'
|
||||
|
||||
ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED]
|
||||
ALL = [PENDING, IN_PROGRESS, COMPLETED, FAILED, CANCELLED]
|
||||
|
||||
|
||||
# Media types that can be previewed in the frontend
|
||||
@ -94,12 +95,6 @@ def normalize_history_item(prompt_id: str, history_item: dict, include_outputs:
|
||||
|
||||
status_info = history_item.get('status', {})
|
||||
status_str = status_info.get('status_str') if status_info else None
|
||||
if status_str == 'success':
|
||||
status = JobStatus.COMPLETED
|
||||
elif status_str == 'error':
|
||||
status = JobStatus.FAILED
|
||||
else:
|
||||
status = JobStatus.COMPLETED
|
||||
|
||||
outputs = history_item.get('outputs', {})
|
||||
outputs_count, preview_output = get_outputs_summary(outputs)
|
||||
@ -107,6 +102,7 @@ def normalize_history_item(prompt_id: str, history_item: dict, include_outputs:
|
||||
execution_error = None
|
||||
execution_start_time = None
|
||||
execution_end_time = None
|
||||
was_interrupted = False
|
||||
if status_info:
|
||||
messages = status_info.get('messages', [])
|
||||
for entry in messages:
|
||||
@ -119,6 +115,15 @@ def normalize_history_item(prompt_id: str, history_item: dict, include_outputs:
|
||||
execution_end_time = event_data.get('timestamp')
|
||||
if event_name == 'execution_error':
|
||||
execution_error = event_data
|
||||
elif event_name == 'execution_interrupted':
|
||||
was_interrupted = True
|
||||
|
||||
if status_str == 'success':
|
||||
status = JobStatus.COMPLETED
|
||||
elif status_str == 'error':
|
||||
status = JobStatus.CANCELLED if was_interrupted else JobStatus.FAILED
|
||||
else:
|
||||
status = JobStatus.COMPLETED
|
||||
|
||||
job = prune_dict({
|
||||
'id': prompt_id,
|
||||
@ -268,13 +273,13 @@ def get_all_jobs(
|
||||
for item in queued:
|
||||
jobs.append(normalize_queue_item(item, JobStatus.PENDING))
|
||||
|
||||
include_completed = JobStatus.COMPLETED in status_filter
|
||||
include_failed = JobStatus.FAILED in status_filter
|
||||
if include_completed or include_failed:
|
||||
history_statuses = {JobStatus.COMPLETED, JobStatus.FAILED, JobStatus.CANCELLED}
|
||||
requested_history_statuses = history_statuses & set(status_filter)
|
||||
if requested_history_statuses:
|
||||
for prompt_id, history_item in history.items():
|
||||
is_failed = history_item.get('status', {}).get('status_str') == 'error'
|
||||
if (is_failed and include_failed) or (not is_failed and include_completed):
|
||||
jobs.append(normalize_history_item(prompt_id, history_item))
|
||||
job = normalize_history_item(prompt_id, history_item)
|
||||
if job.get('status') in requested_history_statuses:
|
||||
jobs.append(job)
|
||||
|
||||
if workflow_id:
|
||||
jobs = [j for j in jobs if j.get('workflow_id') == workflow_id]
|
||||
|
||||
@ -753,7 +753,7 @@ class SamplerCustom(io.ComfyNode):
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
x0_output = {}
|
||||
callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output)
|
||||
callback = latent_preview.prepare_callback(model, sigmas.shape[-1] - 1, x0_output, shape=latent_image.shape if latent_image.is_nested else None)
|
||||
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
samples = comfy.sample.sample_custom(model, noise, cfg, sampler, sigmas, positive, negative, latent_image, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise_seed)
|
||||
@ -944,7 +944,7 @@ class SamplerCustomAdvanced(io.ComfyNode):
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
x0_output = {}
|
||||
callback = latent_preview.prepare_callback(guider.model_patcher, sigmas.shape[-1] - 1, x0_output)
|
||||
callback = latent_preview.prepare_callback(guider.model_patcher, sigmas.shape[-1] - 1, x0_output, shape=latent_image.shape if latent_image.is_nested else None)
|
||||
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
samples = guider.sample(noise.generate_noise(latent), latent_image, sampler, sigmas, denoise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=noise.seed)
|
||||
|
||||
@ -7,6 +7,7 @@ import comfy.model_management
|
||||
import folder_paths
|
||||
import comfy.utils
|
||||
import logging
|
||||
import math
|
||||
|
||||
default_preview_method = args.preview_method
|
||||
|
||||
@ -109,7 +110,7 @@ def get_previewer(device, latent_format):
|
||||
previewer = Latent2RGBPreviewer(latent_format.latent_rgb_factors, latent_format.latent_rgb_factors_bias, latent_format.latent_rgb_factors_reshape)
|
||||
return previewer
|
||||
|
||||
def prepare_callback(model, steps, x0_output_dict=None):
|
||||
def prepare_callback(model, steps, x0_output_dict=None, shape=None):
|
||||
preview_format = "JPEG"
|
||||
if preview_format not in ["JPEG", "PNG"]:
|
||||
preview_format = "JPEG"
|
||||
@ -121,6 +122,10 @@ def prepare_callback(model, steps, x0_output_dict=None):
|
||||
if x0_output_dict is not None:
|
||||
x0_output_dict["x0"] = x0
|
||||
|
||||
if shape is not None:
|
||||
cut = math.prod(shape[1:])
|
||||
x0 = x0[:, :, :cut].reshape([x0.shape[0]] + list(shape)[1:])
|
||||
|
||||
preview_bytes = None
|
||||
if previewer:
|
||||
preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
|
||||
|
||||
2
nodes.py
2
nodes.py
@ -1505,7 +1505,7 @@ def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive,
|
||||
if "noise_mask" in latent:
|
||||
noise_mask = latent["noise_mask"]
|
||||
|
||||
callback = latent_preview.prepare_callback(model, steps)
|
||||
callback = latent_preview.prepare_callback(model, steps, shape=latent_image.shape if latent_image.is_nested else None)
|
||||
disable_pbar = not comfy.utils.PROGRESS_BAR_ENABLED
|
||||
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
|
||||
denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
|
||||
|
||||
@ -19,6 +19,7 @@ class TestJobStatus:
|
||||
assert JobStatus.IN_PROGRESS == 'in_progress'
|
||||
assert JobStatus.COMPLETED == 'completed'
|
||||
assert JobStatus.FAILED == 'failed'
|
||||
assert JobStatus.CANCELLED == 'cancelled'
|
||||
|
||||
def test_all_contains_all_statuses(self):
|
||||
"""ALL should contain all status values."""
|
||||
@ -26,7 +27,8 @@ class TestJobStatus:
|
||||
assert JobStatus.IN_PROGRESS in JobStatus.ALL
|
||||
assert JobStatus.COMPLETED in JobStatus.ALL
|
||||
assert JobStatus.FAILED in JobStatus.ALL
|
||||
assert len(JobStatus.ALL) == 4
|
||||
assert JobStatus.CANCELLED in JobStatus.ALL
|
||||
assert len(JobStatus.ALL) == 5
|
||||
|
||||
|
||||
class TestIsPreviewable:
|
||||
@ -336,6 +338,40 @@ class TestNormalizeHistoryItem:
|
||||
assert job['execution_error']['node_type'] == 'KSampler'
|
||||
assert job['execution_error']['exception_message'] == 'CUDA out of memory'
|
||||
|
||||
def test_cancelled_job(self):
|
||||
"""Cancelled/interrupted history item should have cancelled status."""
|
||||
history_item = {
|
||||
'prompt': (
|
||||
5,
|
||||
'prompt-cancelled',
|
||||
{'nodes': {}},
|
||||
{'create_time': 1234567890000},
|
||||
['node1'],
|
||||
),
|
||||
'status': {
|
||||
'status_str': 'error',
|
||||
'completed': False,
|
||||
'messages': [
|
||||
('execution_start', {'prompt_id': 'prompt-cancelled', 'timestamp': 1234567890500}),
|
||||
('execution_interrupted', {
|
||||
'prompt_id': 'prompt-cancelled',
|
||||
'node_id': '5',
|
||||
'node_type': 'KSampler',
|
||||
'executed': ['1', '2', '3'],
|
||||
'timestamp': 1234567891000,
|
||||
})
|
||||
]
|
||||
},
|
||||
'outputs': {},
|
||||
}
|
||||
|
||||
job = normalize_history_item('prompt-cancelled', history_item)
|
||||
assert job['status'] == 'cancelled'
|
||||
assert job['execution_start_time'] == 1234567890500
|
||||
assert job['execution_end_time'] == 1234567891000
|
||||
# Cancelled jobs should not have execution_error set
|
||||
assert 'execution_error' not in job
|
||||
|
||||
def test_include_outputs(self):
|
||||
"""When include_outputs=True, should include full output data."""
|
||||
history_item = {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user