From 7a9268185cb6456890f6fe61bcc380b5cb21f614 Mon Sep 17 00:00:00 2001 From: WAS Date: Fri, 5 May 2023 18:06:54 -0700 Subject: [PATCH 1/3] Update README.md Add quick search explanation --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 3b3824714..bfa8904df 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,7 @@ Workflow examples can be found on the [Examples page](https://comfyanonymous.git | Q | Toggle visibility of the queue | | H | Toggle visibility of history | | R | Refresh graph | +| Double-Click LMB | Open node quick search palette | Ctrl can also be replaced with Cmd instead for MacOS users From 678f933d382641933920e84414fe36f89d1da5a3 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 6 May 2023 19:00:49 -0400 Subject: [PATCH 2/3] maximum_batch_area for xformers. Remove useless code. --- comfy/model_management.py | 7 ++++++- nodes.py | 4 +--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 7070912df..b0640d674 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -312,7 +312,12 @@ def maximum_batch_area(): return 0 memory_free = get_free_memory() / (1024 * 1024) - area = ((memory_free - 1024) * 0.9) / (0.6) + if xformers_enabled(): + #TODO: this needs to be tweaked + area = 50 * memory_free + else: + #TODO: this formula is because AMD sucks and has memory management issues which might be fixed in the future + area = ((memory_free - 1024) * 0.9) / (0.6) return int(max(area, 0)) def cpu_mode(): diff --git a/nodes.py b/nodes.py index c2bc36855..ca0769ba7 100644 --- a/nodes.py +++ b/nodes.py @@ -105,15 +105,13 @@ class ConditioningSetArea: CATEGORY = "conditioning" - def append(self, conditioning, width, height, x, y, strength, min_sigma=0.0, max_sigma=99.0): + def append(self, conditioning, width, height, x, y, strength): c = [] for t in conditioning: n = [t[0], t[1].copy()] n[1]['area'] = (height // 8, width // 8, y // 8, x // 8) n[1]['strength'] = strength n[1]['set_area_to_bounds'] = False - n[1]['min_sigma'] = min_sigma - n[1]['max_sigma'] = max_sigma c.append(n) return (c, ) From 6fc4917634d457c07eb8b676da4fa88e0ef4704b Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Sat, 6 May 2023 19:58:54 -0400 Subject: [PATCH 3/3] Make maximum_batch_area take into account python2.0 attention function. More conservative xformers maximum_batch_area. --- comfy/model_management.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index b0640d674..39df8d9a7 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -275,8 +275,17 @@ def xformers_enabled_vae(): return XFORMERS_ENABLED_VAE def pytorch_attention_enabled(): + global ENABLE_PYTORCH_ATTENTION return ENABLE_PYTORCH_ATTENTION +def pytorch_attention_flash_attention(): + global ENABLE_PYTORCH_ATTENTION + if ENABLE_PYTORCH_ATTENTION: + #TODO: more reliable way of checking for flash attention? + if torch.version.cuda: #pytorch flash attention only works on Nvidia + return True + return False + def get_free_memory(dev=None, torch_free_too=False): global xpu_available global directml_enabled @@ -312,9 +321,9 @@ def maximum_batch_area(): return 0 memory_free = get_free_memory() / (1024 * 1024) - if xformers_enabled(): + if xformers_enabled() or pytorch_attention_flash_attention(): #TODO: this needs to be tweaked - area = 50 * memory_free + area = 20 * memory_free else: #TODO: this formula is because AMD sucks and has memory management issues which might be fixed in the future area = ((memory_free - 1024) * 0.9) / (0.6)