mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-20 19:42:59 +08:00
add mmap tensor
This commit is contained in:
parent
4ac827d564
commit
e9e1d2f0e8
@ -27,6 +27,7 @@ import uuid
|
|||||||
from typing import Callable, Optional
|
from typing import Callable, Optional
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
import tensordict
|
||||||
|
|
||||||
import comfy.float
|
import comfy.float
|
||||||
import comfy.hooks
|
import comfy.hooks
|
||||||
@ -37,6 +38,9 @@ import comfy.utils
|
|||||||
from comfy.comfy_types import UnetWrapperFunction
|
from comfy.comfy_types import UnetWrapperFunction
|
||||||
from comfy.patcher_extension import CallbacksMP, PatcherInjection, WrappersMP
|
from comfy.patcher_extension import CallbacksMP, PatcherInjection, WrappersMP
|
||||||
|
|
||||||
|
def to_mmap(t: torch.Tensor) -> tensordict.MemoryMappedTensor:
|
||||||
|
return tensordict.MemoryMappedTensor.from_tensor(t)
|
||||||
|
|
||||||
|
|
||||||
def string_to_seed(data):
|
def string_to_seed(data):
|
||||||
crc = 0xFFFFFFFF
|
crc = 0xFFFFFFFF
|
||||||
@ -784,9 +788,37 @@ class ModelPatcher:
|
|||||||
self.backup.clear()
|
self.backup.clear()
|
||||||
|
|
||||||
if device_to is not None:
|
if device_to is not None:
|
||||||
# TODO(sf): to mmap
|
# Temporarily register to_mmap method to the model
|
||||||
# self.model is what module?
|
# Reference: https://github.com/pytorch/pytorch/blob/0fabc3ba44823f257e70ce397d989c8de5e362c1/torch/nn/modules/module.py#L1244
|
||||||
self.model.to(device_to)
|
def _to_mmap_method(self):
|
||||||
|
"""Convert all parameters and buffers to memory-mapped tensors
|
||||||
|
|
||||||
|
This method mimics PyTorch's Module.to() behavior but converts
|
||||||
|
tensors to memory-mapped format instead.
|
||||||
|
"""
|
||||||
|
import pdb; pdb.set_trace()
|
||||||
|
logging.info(f"model {self.model.__class__.__name__} is calling to_mmap method")
|
||||||
|
def convert_fn(t):
|
||||||
|
if isinstance(t, torch.Tensor) and not isinstance(t, torch.nn.Parameter):
|
||||||
|
return to_mmap(t)
|
||||||
|
elif isinstance(t, torch.nn.Parameter):
|
||||||
|
# For parameters, convert the data and wrap back in Parameter
|
||||||
|
param_mmap = to_mmap(t.data)
|
||||||
|
return torch.nn.Parameter(param_mmap, requires_grad=t.requires_grad)
|
||||||
|
return t
|
||||||
|
|
||||||
|
return self._apply(convert_fn)
|
||||||
|
|
||||||
|
# Bind the method to the model instance
|
||||||
|
import types
|
||||||
|
self.model.to_mmap = types.MethodType(_to_mmap_method, self.model)
|
||||||
|
|
||||||
|
# Call the to_mmap method
|
||||||
|
self.model.to_mmap()
|
||||||
|
|
||||||
|
# Optionally clean up the temporary method
|
||||||
|
# delattr(self.model, 'to_mmap')
|
||||||
|
|
||||||
self.model.device = device_to
|
self.model.device = device_to
|
||||||
self.model.model_loaded_weight_memory = 0
|
self.model.model_loaded_weight_memory = 0
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user