mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-21 12:00:49 +08:00
mm: Add free_ram()
Add the free_ram() API and a means to install implementations of the freer (I.E. the RAM cache).
This commit is contained in:
parent
68053b1180
commit
62a2622591
@ -445,6 +445,20 @@ try:
|
|||||||
except:
|
except:
|
||||||
logging.warning("Could not pick default device.")
|
logging.warning("Could not pick default device.")
|
||||||
|
|
||||||
|
current_ram_listeners = set()
|
||||||
|
|
||||||
|
def register_ram_listener(listener):
|
||||||
|
current_ram_listeners.add(listener)
|
||||||
|
|
||||||
|
def unregister_ram_listener(listener):
|
||||||
|
current_ram_listeners.discard(listener)
|
||||||
|
|
||||||
|
def free_ram(extra_ram=0, state_dict={}):
|
||||||
|
for tensor in state_dict.values():
|
||||||
|
if isinstance(tensor, torch.Tensor):
|
||||||
|
extra_ram += tensor.numel() * tensor.element_size()
|
||||||
|
for listener in current_ram_listeners:
|
||||||
|
listener.free_ram(extra_ram)
|
||||||
|
|
||||||
current_loaded_models = []
|
current_loaded_models = []
|
||||||
|
|
||||||
|
|||||||
@ -613,13 +613,21 @@ async def execute(server, dynprompt, caches, current_item, extra_data, executed,
|
|||||||
|
|
||||||
class PromptExecutor:
|
class PromptExecutor:
|
||||||
def __init__(self, server, cache_type=False, cache_args=None):
|
def __init__(self, server, cache_type=False, cache_args=None):
|
||||||
|
self.caches = None
|
||||||
self.cache_args = cache_args
|
self.cache_args = cache_args
|
||||||
self.cache_type = cache_type
|
self.cache_type = cache_type
|
||||||
self.server = server
|
self.server = server
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
|
if self.caches is not None:
|
||||||
|
for cache in self.caches.all:
|
||||||
|
comfy.model_management.unregister_ram_listener(cache)
|
||||||
|
|
||||||
self.caches = CacheSet(cache_type=self.cache_type, cache_args=self.cache_args)
|
self.caches = CacheSet(cache_type=self.cache_type, cache_args=self.cache_args)
|
||||||
|
|
||||||
|
for cache in self.caches.all:
|
||||||
|
comfy.model_management.register_ram_listener(cache)
|
||||||
self.status_messages = []
|
self.status_messages = []
|
||||||
self.success = True
|
self.success = True
|
||||||
|
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user