mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-17 08:52:34 +08:00
Set Flux models to eval mode to restore FP8 performance
Fixes performance regression introduced in #9854 where the fp8_linear optimization path was being skipped during inference because Flux models were not explicitly set to eval mode.
This commit is contained in:
parent
d68ece7301
commit
f87480188a
@ -834,6 +834,7 @@ class PixArt(BaseModel):
|
|||||||
class Flux(BaseModel):
|
class Flux(BaseModel):
|
||||||
def __init__(self, model_config, model_type=ModelType.FLUX, device=None, unet_model=comfy.ldm.flux.model.Flux):
|
def __init__(self, model_config, model_type=ModelType.FLUX, device=None, unet_model=comfy.ldm.flux.model.Flux):
|
||||||
super().__init__(model_config, model_type, device=device, unet_model=unet_model)
|
super().__init__(model_config, model_type, device=device, unet_model=unet_model)
|
||||||
|
self.diffusion_model.eval().requires_grad_(False)
|
||||||
self.memory_usage_factor_conds = ("ref_latents",)
|
self.memory_usage_factor_conds = ("ref_latents",)
|
||||||
|
|
||||||
def concat_cond(self, **kwargs):
|
def concat_cond(self, **kwargs):
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user