mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-31 00:30:21 +08:00
Compare commits
10 Commits
c7375db42b
...
2d5d95bb98
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d5d95bb98 | ||
|
|
5ac1372533 | ||
|
|
1dcbd9efaf | ||
|
|
db9e6edfa1 | ||
|
|
8af13b439b | ||
|
|
acd0e53653 | ||
|
|
117e7a5853 | ||
|
|
b3c0e4de57 | ||
|
|
ecaeeb990d | ||
|
|
cc5d177b54 |
@ -92,14 +92,23 @@ def seed_from_paths_batch(
|
|||||||
session.execute(ins_asset, chunk)
|
session.execute(ins_asset, chunk)
|
||||||
|
|
||||||
# try to claim AssetCacheState (file_path)
|
# try to claim AssetCacheState (file_path)
|
||||||
winners_by_path: set[str] = set()
|
# Insert with ON CONFLICT DO NOTHING, then query to find which paths were actually inserted
|
||||||
ins_state = (
|
ins_state = (
|
||||||
sqlite.insert(AssetCacheState)
|
sqlite.insert(AssetCacheState)
|
||||||
.on_conflict_do_nothing(index_elements=[AssetCacheState.file_path])
|
.on_conflict_do_nothing(index_elements=[AssetCacheState.file_path])
|
||||||
.returning(AssetCacheState.file_path)
|
|
||||||
)
|
)
|
||||||
for chunk in _iter_chunks(state_rows, _rows_per_stmt(3)):
|
for chunk in _iter_chunks(state_rows, _rows_per_stmt(3)):
|
||||||
winners_by_path.update((session.execute(ins_state, chunk)).scalars().all())
|
session.execute(ins_state, chunk)
|
||||||
|
|
||||||
|
# Query to find which of our paths won (were actually inserted)
|
||||||
|
winners_by_path: set[str] = set()
|
||||||
|
for chunk in _iter_chunks(path_list, MAX_BIND_PARAMS):
|
||||||
|
result = session.execute(
|
||||||
|
sqlalchemy.select(AssetCacheState.file_path)
|
||||||
|
.where(AssetCacheState.file_path.in_(chunk))
|
||||||
|
.where(AssetCacheState.asset_id.in_([path_to_asset[p] for p in chunk]))
|
||||||
|
)
|
||||||
|
winners_by_path.update(result.scalars().all())
|
||||||
|
|
||||||
all_paths_set = set(path_list)
|
all_paths_set = set(path_list)
|
||||||
losers_by_path = all_paths_set - winners_by_path
|
losers_by_path = all_paths_set - winners_by_path
|
||||||
@ -112,16 +121,23 @@ def seed_from_paths_batch(
|
|||||||
return {"inserted_infos": 0, "won_states": 0, "lost_states": len(losers_by_path)}
|
return {"inserted_infos": 0, "won_states": 0, "lost_states": len(losers_by_path)}
|
||||||
|
|
||||||
# insert AssetInfo only for winners
|
# insert AssetInfo only for winners
|
||||||
|
# Insert with ON CONFLICT DO NOTHING, then query to find which were actually inserted
|
||||||
winner_info_rows = [asset_to_info[path_to_asset[p]] for p in winners_by_path]
|
winner_info_rows = [asset_to_info[path_to_asset[p]] for p in winners_by_path]
|
||||||
ins_info = (
|
ins_info = (
|
||||||
sqlite.insert(AssetInfo)
|
sqlite.insert(AssetInfo)
|
||||||
.on_conflict_do_nothing(index_elements=[AssetInfo.asset_id, AssetInfo.owner_id, AssetInfo.name])
|
.on_conflict_do_nothing(index_elements=[AssetInfo.asset_id, AssetInfo.owner_id, AssetInfo.name])
|
||||||
.returning(AssetInfo.id)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
inserted_info_ids: set[str] = set()
|
|
||||||
for chunk in _iter_chunks(winner_info_rows, _rows_per_stmt(9)):
|
for chunk in _iter_chunks(winner_info_rows, _rows_per_stmt(9)):
|
||||||
inserted_info_ids.update((session.execute(ins_info, chunk)).scalars().all())
|
session.execute(ins_info, chunk)
|
||||||
|
|
||||||
|
# Query to find which info rows were actually inserted (by matching our generated IDs)
|
||||||
|
all_info_ids = [row["id"] for row in winner_info_rows]
|
||||||
|
inserted_info_ids: set[str] = set()
|
||||||
|
for chunk in _iter_chunks(all_info_ids, MAX_BIND_PARAMS):
|
||||||
|
result = session.execute(
|
||||||
|
sqlalchemy.select(AssetInfo.id).where(AssetInfo.id.in_(chunk))
|
||||||
|
)
|
||||||
|
inserted_info_ids.update(result.scalars().all())
|
||||||
|
|
||||||
# build and insert tag + meta rows for the AssetInfo
|
# build and insert tag + meta rows for the AssetInfo
|
||||||
tag_rows: list[dict] = []
|
tag_rows: list[dict] = []
|
||||||
|
|||||||
118
comfy/float.py
118
comfy/float.py
@ -65,3 +65,121 @@ def stochastic_rounding(value, dtype, seed=0):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
return value.to(dtype=dtype)
|
return value.to(dtype=dtype)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: improve this?
|
||||||
|
def stochastic_float_to_fp4_e2m1(x, generator):
|
||||||
|
orig_shape = x.shape
|
||||||
|
sign = torch.signbit(x).to(torch.uint8)
|
||||||
|
|
||||||
|
exp = torch.floor(torch.log2(x.abs()) + 1.0).clamp(0, 3)
|
||||||
|
x += (torch.rand(x.size(), dtype=x.dtype, layout=x.layout, device=x.device, generator=generator) - 0.5) * (2 ** (exp - 2.0)) * 1.25
|
||||||
|
|
||||||
|
x = x.abs()
|
||||||
|
exp = torch.floor(torch.log2(x) + 1.1925).clamp(0, 3)
|
||||||
|
|
||||||
|
mantissa = torch.where(
|
||||||
|
exp > 0,
|
||||||
|
(x / (2.0 ** (exp - 1)) - 1.0) * 2.0,
|
||||||
|
(x * 2.0),
|
||||||
|
out=x
|
||||||
|
).round().to(torch.uint8)
|
||||||
|
del x
|
||||||
|
|
||||||
|
exp = exp.to(torch.uint8)
|
||||||
|
|
||||||
|
fp4 = (sign << 3) | (exp << 1) | mantissa
|
||||||
|
del sign, exp, mantissa
|
||||||
|
|
||||||
|
fp4_flat = fp4.view(-1)
|
||||||
|
packed = (fp4_flat[0::2] << 4) | fp4_flat[1::2]
|
||||||
|
return packed.reshape(list(orig_shape)[:-1] + [-1])
|
||||||
|
|
||||||
|
|
||||||
|
def to_blocked(input_matrix, flatten: bool = True) -> torch.Tensor:
|
||||||
|
"""
|
||||||
|
Rearrange a large matrix by breaking it into blocks and applying the rearrangement pattern.
|
||||||
|
See:
|
||||||
|
https://docs.nvidia.com/cuda/cublas/index.html#d-block-scaling-factors-layout
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_matrix: Input tensor of shape (H, W)
|
||||||
|
Returns:
|
||||||
|
Rearranged tensor of shape (32*ceil_div(H,128), 16*ceil_div(W,4))
|
||||||
|
"""
|
||||||
|
|
||||||
|
def ceil_div(a, b):
|
||||||
|
return (a + b - 1) // b
|
||||||
|
|
||||||
|
rows, cols = input_matrix.shape
|
||||||
|
n_row_blocks = ceil_div(rows, 128)
|
||||||
|
n_col_blocks = ceil_div(cols, 4)
|
||||||
|
|
||||||
|
# Calculate the padded shape
|
||||||
|
padded_rows = n_row_blocks * 128
|
||||||
|
padded_cols = n_col_blocks * 4
|
||||||
|
|
||||||
|
padded = input_matrix
|
||||||
|
if (rows, cols) != (padded_rows, padded_cols):
|
||||||
|
padded = torch.zeros(
|
||||||
|
(padded_rows, padded_cols),
|
||||||
|
device=input_matrix.device,
|
||||||
|
dtype=input_matrix.dtype,
|
||||||
|
)
|
||||||
|
padded[:rows, :cols] = input_matrix
|
||||||
|
|
||||||
|
# Rearrange the blocks
|
||||||
|
blocks = padded.view(n_row_blocks, 128, n_col_blocks, 4).permute(0, 2, 1, 3)
|
||||||
|
rearranged = blocks.reshape(-1, 4, 32, 4).transpose(1, 2).reshape(-1, 32, 16)
|
||||||
|
if flatten:
|
||||||
|
return rearranged.flatten()
|
||||||
|
|
||||||
|
return rearranged.reshape(padded_rows, padded_cols)
|
||||||
|
|
||||||
|
|
||||||
|
def stochastic_round_quantize_nvfp4(x, per_tensor_scale, pad_16x, seed=0):
|
||||||
|
F4_E2M1_MAX = 6.0
|
||||||
|
F8_E4M3_MAX = 448.0
|
||||||
|
|
||||||
|
def roundup(x: int, multiple: int) -> int:
|
||||||
|
"""Round up x to the nearest multiple."""
|
||||||
|
return ((x + multiple - 1) // multiple) * multiple
|
||||||
|
|
||||||
|
orig_shape = x.shape
|
||||||
|
|
||||||
|
# Handle padding
|
||||||
|
if pad_16x:
|
||||||
|
rows, cols = x.shape
|
||||||
|
padded_rows = roundup(rows, 16)
|
||||||
|
padded_cols = roundup(cols, 16)
|
||||||
|
if padded_rows != rows or padded_cols != cols:
|
||||||
|
x = torch.nn.functional.pad(x, (0, padded_cols - cols, 0, padded_rows - rows))
|
||||||
|
# Note: We update orig_shape because the output tensor logic below assumes x.shape matches
|
||||||
|
# what we want to produce. If we pad here, we want the padded output.
|
||||||
|
orig_shape = x.shape
|
||||||
|
|
||||||
|
block_size = 16
|
||||||
|
|
||||||
|
x = x.reshape(orig_shape[0], -1, block_size)
|
||||||
|
max_abs = torch.amax(torch.abs(x), dim=-1)
|
||||||
|
block_scale = max_abs / F4_E2M1_MAX
|
||||||
|
scaled_block_scales = block_scale / per_tensor_scale.to(block_scale.dtype)
|
||||||
|
scaled_block_scales_fp8 = torch.clamp(scaled_block_scales, max=F8_E4M3_MAX).to(torch.float8_e4m3fn)
|
||||||
|
total_scale = per_tensor_scale.to(x.dtype) * scaled_block_scales_fp8.to(x.dtype)
|
||||||
|
|
||||||
|
# Handle zero blocks (from padding): avoid 0/0 NaN
|
||||||
|
zero_scale_mask = (total_scale == 0)
|
||||||
|
total_scale_safe = torch.where(zero_scale_mask, torch.ones_like(total_scale), total_scale)
|
||||||
|
|
||||||
|
x = x / total_scale_safe.unsqueeze(-1)
|
||||||
|
|
||||||
|
generator = torch.Generator(device=x.device)
|
||||||
|
generator.manual_seed(seed)
|
||||||
|
|
||||||
|
x = torch.where(zero_scale_mask.unsqueeze(-1), torch.zeros_like(x), x)
|
||||||
|
|
||||||
|
x = x.view(orig_shape)
|
||||||
|
data_lp = stochastic_float_to_fp4_e2m1(x, generator=generator)
|
||||||
|
|
||||||
|
blocked_scales = to_blocked(scaled_block_scales_fp8, flatten=False)
|
||||||
|
return data_lp, blocked_scales
|
||||||
|
|||||||
@ -699,7 +699,7 @@ def mixed_precision_ops(quant_config={}, compute_dtype=torch.bfloat16, full_prec
|
|||||||
def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs):
|
def set_weight(self, weight, inplace_update=False, seed=None, return_weight=False, **kwargs):
|
||||||
if getattr(self, 'layout_type', None) is not None:
|
if getattr(self, 'layout_type', None) is not None:
|
||||||
# dtype is now implicit in the layout class
|
# dtype is now implicit in the layout class
|
||||||
weight = QuantizedTensor.from_float(weight, self.layout_type, scale="recalculate", stochastic_rounding=seed, inplace_ops=True)
|
weight = QuantizedTensor.from_float(weight, self.layout_type, scale="recalculate", stochastic_rounding=seed, inplace_ops=True).to(self.weight.dtype)
|
||||||
else:
|
else:
|
||||||
weight = weight.to(self.weight.dtype)
|
weight = weight.to(self.weight.dtype)
|
||||||
if return_weight:
|
if return_weight:
|
||||||
|
|||||||
@ -7,7 +7,7 @@ try:
|
|||||||
QuantizedTensor,
|
QuantizedTensor,
|
||||||
QuantizedLayout,
|
QuantizedLayout,
|
||||||
TensorCoreFP8Layout as _CKFp8Layout,
|
TensorCoreFP8Layout as _CKFp8Layout,
|
||||||
TensorCoreNVFP4Layout, # Direct import, no wrapper needed
|
TensorCoreNVFP4Layout as _CKNvfp4Layout,
|
||||||
register_layout_op,
|
register_layout_op,
|
||||||
register_layout_class,
|
register_layout_class,
|
||||||
get_layout_class,
|
get_layout_class,
|
||||||
@ -34,7 +34,7 @@ except ImportError as e:
|
|||||||
class _CKFp8Layout:
|
class _CKFp8Layout:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class TensorCoreNVFP4Layout:
|
class _CKNvfp4Layout:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def register_layout_class(name, cls):
|
def register_layout_class(name, cls):
|
||||||
@ -84,6 +84,39 @@ class _TensorCoreFP8LayoutBase(_CKFp8Layout):
|
|||||||
return qdata, params
|
return qdata, params
|
||||||
|
|
||||||
|
|
||||||
|
class TensorCoreNVFP4Layout(_CKNvfp4Layout):
|
||||||
|
@classmethod
|
||||||
|
def quantize(cls, tensor, scale=None, stochastic_rounding=0, inplace_ops=False):
|
||||||
|
if tensor.dim() != 2:
|
||||||
|
raise ValueError(f"NVFP4 requires 2D tensor, got {tensor.dim()}D")
|
||||||
|
|
||||||
|
orig_dtype = tensor.dtype
|
||||||
|
orig_shape = tuple(tensor.shape)
|
||||||
|
|
||||||
|
if scale is None or (isinstance(scale, str) and scale == "recalculate"):
|
||||||
|
scale = torch.amax(tensor.abs()) / (ck.float_utils.F8_E4M3_MAX * ck.float_utils.F4_E2M1_MAX)
|
||||||
|
|
||||||
|
if not isinstance(scale, torch.Tensor):
|
||||||
|
scale = torch.tensor(scale)
|
||||||
|
scale = scale.to(device=tensor.device, dtype=torch.float32)
|
||||||
|
|
||||||
|
padded_shape = cls.get_padded_shape(orig_shape)
|
||||||
|
needs_padding = padded_shape != orig_shape
|
||||||
|
|
||||||
|
if stochastic_rounding > 0:
|
||||||
|
qdata, block_scale = comfy.float.stochastic_round_quantize_nvfp4(tensor, scale, pad_16x=needs_padding, seed=stochastic_rounding)
|
||||||
|
else:
|
||||||
|
qdata, block_scale = ck.quantize_nvfp4(tensor, scale, pad_16x=needs_padding)
|
||||||
|
|
||||||
|
params = cls.Params(
|
||||||
|
scale=scale,
|
||||||
|
orig_dtype=orig_dtype,
|
||||||
|
orig_shape=orig_shape,
|
||||||
|
block_scale=block_scale,
|
||||||
|
)
|
||||||
|
return qdata, params
|
||||||
|
|
||||||
|
|
||||||
class TensorCoreFP8E4M3Layout(_TensorCoreFP8LayoutBase):
|
class TensorCoreFP8E4M3Layout(_TensorCoreFP8LayoutBase):
|
||||||
FP8_DTYPE = torch.float8_e4m3fn
|
FP8_DTYPE = torch.float8_e4m3fn
|
||||||
|
|
||||||
|
|||||||
@ -845,7 +845,7 @@ class LTXAV(LTXV):
|
|||||||
|
|
||||||
def __init__(self, unet_config):
|
def __init__(self, unet_config):
|
||||||
super().__init__(unet_config)
|
super().__init__(unet_config)
|
||||||
self.memory_usage_factor = 0.061 # TODO
|
self.memory_usage_factor = 0.077 # TODO
|
||||||
|
|
||||||
def get_model(self, state_dict, prefix="", device=None):
|
def get_model(self, state_dict, prefix="", device=None):
|
||||||
out = model_base.LTXAV(self, device=device)
|
out = model_base.LTXAV(self, device=device)
|
||||||
|
|||||||
@ -49,6 +49,9 @@ class DifferentialDiffusion(io.ComfyNode):
|
|||||||
|
|
||||||
threshold = (current_ts - ts_to) / (ts_from - ts_to)
|
threshold = (current_ts - ts_to) / (ts_from - ts_to)
|
||||||
|
|
||||||
|
# Move the threshold tensor to the same device as denoise mask tensor
|
||||||
|
threshold = threshold.to(denoise_mask.device)
|
||||||
|
|
||||||
# Generate the binary mask based on the threshold
|
# Generate the binary mask based on the threshold
|
||||||
binary_mask = (denoise_mask >= threshold).to(denoise_mask.dtype)
|
binary_mask = (denoise_mask >= threshold).to(denoise_mask.dtype)
|
||||||
|
|
||||||
|
|||||||
@ -1,3 +1,3 @@
|
|||||||
# This file is automatically generated by the build process when version is
|
# This file is automatically generated by the build process when version is
|
||||||
# updated in pyproject.toml.
|
# updated in pyproject.toml.
|
||||||
__version__ = "0.8.2"
|
__version__ = "0.9.1"
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ComfyUI"
|
name = "ComfyUI"
|
||||||
version = "0.8.2"
|
version = "0.9.1"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file = "LICENSE" }
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
comfyui-frontend-package==1.36.13
|
comfyui-frontend-package==1.36.14
|
||||||
comfyui-workflow-templates==0.8.0
|
comfyui-workflow-templates==0.8.4
|
||||||
comfyui-embedded-docs==0.4.0
|
comfyui-embedded-docs==0.4.0
|
||||||
torch
|
torch
|
||||||
torchsde
|
torchsde
|
||||||
@ -21,7 +21,7 @@ psutil
|
|||||||
alembic
|
alembic
|
||||||
SQLAlchemy
|
SQLAlchemy
|
||||||
av>=14.2.0
|
av>=14.2.0
|
||||||
comfy-kitchen>=0.2.5
|
comfy-kitchen>=0.2.6
|
||||||
|
|
||||||
#non essential dependencies:
|
#non essential dependencies:
|
||||||
kornia>=0.7.1
|
kornia>=0.7.1
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user