Compare commits

...

4 Commits

Author SHA1 Message Date
vanDuven
16867004b3
Merge 5905513e32 into 5943fbf457 2026-01-08 21:36:07 +02:00
Dr.Lt.Data
5943fbf457
bump comfyui_manager version to the 4.0.5 (#11732)
Some checks are pending
Python Linting / Run Ruff (push) Waiting to run
Python Linting / Run Pylint (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.10, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.11, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-stable (12.1, , linux, 3.12, [self-hosted Linux], stable) (push) Waiting to run
Full Comfy CI Workflow Runs / test-unix-nightly (12.1, , linux, 3.11, [self-hosted Linux], nightly) (push) Waiting to run
Execution Tests / test (macos-latest) (push) Waiting to run
Execution Tests / test (ubuntu-latest) (push) Waiting to run
Execution Tests / test (windows-latest) (push) Waiting to run
Test server launches without errors / test (push) Waiting to run
Unit Tests / test (macos-latest) (push) Waiting to run
Unit Tests / test (ubuntu-latest) (push) Waiting to run
Unit Tests / test (windows-2022) (push) Waiting to run
2026-01-08 08:15:42 -08:00
vanDuven
5905513e32 revert fp16 support for Lumina2 2025-12-11 01:06:26 -05:00
vanDuven
a8ea6953ec Fix Z Image FP16 overflow via downscaling 2025-12-08 05:43:02 -05:00
2 changed files with 11 additions and 3 deletions

View File

@ -119,6 +119,9 @@ class JointAttention(nn.Module):
xv = xv.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3)
output = optimized_attention_masked(xq.movedim(1, 2), xk.movedim(1, 2), xv.movedim(1, 2), self.n_local_heads, x_mask, skip_reshape=True, transformer_options=transformer_options)
if output.dtype == torch.float16:
output.div_(4)
return self.out(output)
@ -175,8 +178,12 @@ class FeedForward(nn.Module):
def _forward_silu_gating(self, x1, x3):
return clamp_fp16(F.silu(x1) * x3)
def forward(self, x):
return self.w2(self._forward_silu_gating(self.w1(x), self.w3(x)))
def forward(self, x, apply_fp16_downscale=False):
x3 = self.w3(x)
if x.dtype == torch.float16 and apply_fp16_downscale:
x3.div_(32)
return self.w2(self._forward_silu_gating(self.w1(x), x3))
class JointTransformerBlock(nn.Module):
@ -287,6 +294,7 @@ class JointTransformerBlock(nn.Module):
x = x + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(
clamp_fp16(self.feed_forward(
modulate(self.ffn_norm1(x), scale_mlp),
apply_fp16_downscale=True,
))
)
else:

View File

@ -1 +1 @@
comfyui_manager==4.0.4
comfyui_manager==4.0.5