Compare commits

..

No commits in common. "00daa775249d11e78e90ab44a4d502fb57c04279" and "20bd2c0236a9e88ce6de333d0527e0f7804987dd" have entirely different histories.

View File

@ -51,11 +51,10 @@ class TrainGuider(comfy_extras.nodes_custom_sampler.Guider_Basic):
noise.shape,
self.conds,
self.model_options,
force_full_load=not self.offloading,
force_full_load=False,
force_offload=self.offloading,
)
)
torch.cuda.empty_cache()
device = self.model_patcher.load_device
if denoise_mask is not None:
@ -1032,7 +1031,7 @@ class TrainLoraNode(io.ComfyNode):
max=5,
tooltip="Depth level for gradient checkpointing.",
),
io.Boolean.Input(
io.Int.Input(
"offloading",
default=False,
tooltip="Depth level for gradient checkpointing.",
@ -1103,7 +1102,6 @@ class TrainLoraNode(io.ComfyNode):
lora_dtype = lora_dtype[0]
algorithm = algorithm[0]
gradient_checkpointing = gradient_checkpointing[0]
offloading = offloading[0]
checkpoint_depth = checkpoint_depth[0]
existing_lora = existing_lora[0]
bucket_mode = bucket_mode[0]
@ -1170,7 +1168,7 @@ class TrainLoraNode(io.ComfyNode):
# With force_full_load=False we should be able to have offloading
# But for offloading in training we need custom AutoGrad hooks for fwd/bwd
comfy.model_management.load_models_gpu(
[mp], memory_required=1e20, force_full_load=not offloading
[mp], memory_required=1e20, force_full_load=False
)
torch.cuda.empty_cache()
@ -1207,7 +1205,7 @@ class TrainLoraNode(io.ComfyNode):
)
# Setup guider
guider = TrainGuider(mp, offloading=offloading)
guider = TrainGuider(mp, offloading)
guider.set_conds(positive)
# Inject bypass hooks if bypass mode is enabled
@ -1241,20 +1239,19 @@ class TrainLoraNode(io.ComfyNode):
unpatch(m)
del train_sampler, optimizer
for param in lora_sd:
lora_sd[param] = lora_sd[param].to(lora_dtype).detach()
# Finalize adapters
for adapter in all_weight_adapters:
adapter.requires_grad_(False)
del adapter
del all_weight_adapters
for param in lora_sd:
lora_sd[param] = lora_sd[param].to(lora_dtype)
# mp in train node is highly specialized for training
# use it in inference will result in bad behavior so we don't return it
return io.NodeOutput(lora_sd, loss_map, steps + existing_steps)
class LoraModelLoader(io.ComfyNode):
class LoraModelLoader(io.ComfyNode):#
@classmethod
def define_schema(cls):
return io.Schema(
@ -1276,11 +1273,6 @@ class LoraModelLoader(io.ComfyNode):
max=100.0,
tooltip="How strongly to modify the diffusion model. This value can be negative.",
),
io.Boolean.Input(
"bypass",
default=False,
tooltip="When enabled, applies LoRA in bypass mode without modifying base model weights. Useful for training and when model weights are offloaded.",
),
],
outputs=[
io.Model.Output(
@ -1290,18 +1282,13 @@ class LoraModelLoader(io.ComfyNode):
)
@classmethod
def execute(cls, model, lora, strength_model, bypass=False):
def execute(cls, model, lora, strength_model):
if strength_model == 0:
return io.NodeOutput(model)
if bypass:
model_lora, _ = comfy.sd.load_bypass_lora_for_models(
model, None, lora, strength_model, 0
)
else:
model_lora, _ = comfy.sd.load_lora_for_models(
model, None, lora, strength_model, 0
)
model_lora, _ = comfy.sd.load_lora_for_models(
model, None, lora, strength_model, 0
)
return io.NodeOutput(model_lora)