From 5190aa284d501182196672865703a0c659fe20c1 Mon Sep 17 00:00:00 2001 From: melMass Date: Fri, 21 Jul 2023 13:19:05 +0200 Subject: [PATCH 01/13] =?UTF-8?q?fix:=20=E2=9A=A1=EF=B8=8F=20small=20type?= =?UTF-8?q?=20fix?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit getCustomWidgets expects a plain record and not an array of records --- web/types/comfy.d.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/web/types/comfy.d.ts b/web/types/comfy.d.ts index 8444e13a8..f7129b555 100644 --- a/web/types/comfy.d.ts +++ b/web/types/comfy.d.ts @@ -30,9 +30,7 @@ export interface ComfyExtension { getCustomWidgets( app: ComfyApp ): Promise< - Array< - Record { widget?: IWidget; minWidth?: number; minHeight?: number }> - > + Record { widget?: IWidget; minWidth?: number; minHeight?: number }> >; /** * Allows the extension to add additional handling to the node before it is registered with LGraph From 0240946ecff9527a1c39f705ae9d2324612f4cf7 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Jul 2023 09:25:02 -0400 Subject: [PATCH 02/13] Add a way to set which range of timesteps the cond gets applied to. --- comfy/samplers.py | 30 ++++++++++++++++++++++++++++++ nodes.py | 23 +++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/comfy/samplers.py b/comfy/samplers.py index 9eee25a92..5b01d48fb 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -17,6 +17,14 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con def get_area_and_mult(cond, x_in, cond_concat_in, timestep_in): area = (x_in.shape[2], x_in.shape[3], 0, 0) strength = 1.0 + if 'timestep_start' in cond[1]: + timestep_start = cond[1]['timestep_start'] + if timestep_in > timestep_start: + return None + if 'timestep_end' in cond[1]: + timestep_end = cond[1]['timestep_end'] + if timestep_in < timestep_end: + return None if 'area' in cond[1]: area = cond[1]['area'] if 'strength' in cond[1]: @@ -428,6 +436,25 @@ def create_cond_with_same_area_if_none(conds, c): n = c[1].copy() conds += [[smallest[0], n]] +def calculate_start_end_timesteps(model, conds): + for t in range(len(conds)): + x = conds[t] + + timestep_start = None + timestep_end = None + if 'start_percent' in x[1]: + timestep_start = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['start_percent'] * 999.0))) + if 'end_percent' in x[1]: + timestep_end = model.sigma_to_t(model.t_to_sigma(torch.tensor(x[1]['end_percent'] * 999.0))) + + if (timestep_start is not None) or (timestep_end is not None): + n = x[1].copy() + if (timestep_start is not None): + n['timestep_start'] = timestep_start + if (timestep_end is not None): + n['timestep_end'] = timestep_end + conds[t] = [x[0], n] + def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] cond_other = [] @@ -571,6 +598,9 @@ class KSampler: resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device) resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device) + calculate_start_end_timesteps(self.model_wrap, negative) + calculate_start_end_timesteps(self.model_wrap, positive) + #make sure each cond area has an opposite one with the same area for c in positive: create_cond_with_same_area_if_none(negative, c) diff --git a/nodes.py b/nodes.py index a1c4b8437..db39c0ced 100644 --- a/nodes.py +++ b/nodes.py @@ -204,6 +204,28 @@ class ConditioningZeroOut: c.append(n) return (c, ) +class ConditioningSetTimestepRange: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning": ("CONDITIONING", ), + "start": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}) + }} + RETURN_TYPES = ("CONDITIONING",) + FUNCTION = "set_range" + + CATEGORY = "advanced/conditioning" + + def set_range(self, conditioning, start, end): + c = [] + for t in conditioning: + d = t[1].copy() + d['start_percent'] = start + d['end_percent'] = end + n = [t[0], d] + c.append(n) + return (c, ) + class VAEDecode: @classmethod def INPUT_TYPES(s): @@ -1444,6 +1466,7 @@ NODE_CLASS_MAPPINGS = { "SaveLatent": SaveLatent, "ConditioningZeroOut": ConditioningZeroOut, + "ConditioningSetTimestepRange": ConditioningSetTimestepRange, } NODE_DISPLAY_NAME_MAPPINGS = { From d191c4f9edee21f675d00d9d6feb02a32d63942e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Jul 2023 13:26:07 -0400 Subject: [PATCH 03/13] Add a ControlNetApplyAdvanced node. The controlnet can be applied to the positive or negative prompt only by connecting it correctly. --- comfy/samplers.py | 2 +- nodes.py | 49 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 5b01d48fb..504d7b055 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -607,7 +607,7 @@ class KSampler: for c in negative: create_cond_with_same_area_if_none(positive, c) - apply_empty_x_to_equal_area(positive, negative, 'control', lambda cond_cnets, x: cond_cnets[x]) + apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) if self.model.is_adm(): diff --git a/nodes.py b/nodes.py index db39c0ced..472f0039f 100644 --- a/nodes.py +++ b/nodes.py @@ -602,9 +602,56 @@ class ControlNetApply: if 'control' in t[1]: c_net.set_previous_controlnet(t[1]['control']) n[1]['control'] = c_net + n[1]['control_apply_to_uncond'] = True c.append(n) return (c, ) + +class ControlNetApplyAdvanced: + @classmethod + def INPUT_TYPES(s): + return {"required": {"positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + "control_net": ("CONTROL_NET", ), + "image": ("IMAGE", ), + "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + }} + + RETURN_TYPES = ("CONDITIONING","CONDITIONING") + RETURN_NAMES = ("positive", "negative") + FUNCTION = "apply_controlnet" + + CATEGORY = "conditioning" + + def apply_controlnet(self, positive, negative, control_net, image, strength): + if strength == 0: + return (positive, negative) + + control_hint = image.movedim(-1,1) + cnets = {} + + out = [] + for conditioning in [positive, negative]: + c = [] + for t in conditioning: + d = t[1].copy() + + prev_cnet = d.get('control', None) + if prev_cnet in cnets: + c_net = cnets[prev_cnet] + else: + c_net = control_net.copy().set_cond_hint(control_hint, strength) + c_net.set_previous_controlnet(prev_cnet) + cnets[prev_cnet] = c_net + + d['control'] = c_net + d['control_apply_to_uncond'] = False + n = [t[0], d] + c.append(n) + out.append(c) + return (out[0], out[1]) + + class UNETLoader: @classmethod def INPUT_TYPES(s): @@ -1449,6 +1496,7 @@ NODE_CLASS_MAPPINGS = { "StyleModelApply": StyleModelApply, "unCLIPConditioning": unCLIPConditioning, "ControlNetApply": ControlNetApply, + "ControlNetApplyAdvanced": ControlNetApplyAdvanced, "ControlNetLoader": ControlNetLoader, "DiffControlNetLoader": DiffControlNetLoader, "StyleModelLoader": StyleModelLoader, @@ -1495,6 +1543,7 @@ NODE_DISPLAY_NAME_MAPPINGS = { "ConditioningSetArea": "Conditioning (Set Area)", "ConditioningSetMask": "Conditioning (Set Mask)", "ControlNetApply": "Apply ControlNet", + "ControlNetApplyAdvanced": "Apply ControlNet (Advanced)", # Latent "VAEEncodeForInpaint": "VAE Encode (for Inpainting)", "SetLatentNoiseMask": "Set Latent Noise Mask", From 7ff14b62f87559938ab9e468c4e232201b8553ef Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Jul 2023 17:50:49 -0400 Subject: [PATCH 04/13] ControlNetApplyAdvanced can now define when controlnet gets applied. --- comfy/samplers.py | 12 +++++ comfy/sd.py | 122 ++++++++++++++++++++++++---------------------- nodes.py | 6 ++- 3 files changed, 79 insertions(+), 61 deletions(-) diff --git a/comfy/samplers.py b/comfy/samplers.py index 504d7b055..e059374d3 100644 --- a/comfy/samplers.py +++ b/comfy/samplers.py @@ -455,6 +455,16 @@ def calculate_start_end_timesteps(model, conds): n['timestep_end'] = timestep_end conds[t] = [x[0], n] +def pre_run_control(model, conds): + for t in range(len(conds)): + x = conds[t] + + timestep_start = None + timestep_end = None + percent_to_timestep_function = lambda a: model.sigma_to_t(model.t_to_sigma(torch.tensor(a) * 999.0)) + if 'control' in x[1]: + x[1]['control'].pre_run(model.inner_model, percent_to_timestep_function) + def apply_empty_x_to_equal_area(conds, uncond, name, uncond_fill_func): cond_cnets = [] cond_other = [] @@ -607,6 +617,8 @@ class KSampler: for c in negative: create_cond_with_same_area_if_none(positive, c) + pre_run_control(self.model_wrap, negative + positive) + apply_empty_x_to_equal_area(list(filter(lambda c: c[1].get('control_apply_to_uncond', False) == True, positive)), negative, 'control', lambda cond_cnets, x: cond_cnets[x]) apply_empty_x_to_equal_area(positive, negative, 'gligen', lambda cond_cnets, x: cond_cnets[x]) diff --git a/comfy/sd.py b/comfy/sd.py index 1f364dd1f..aaeed0561 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -673,16 +673,58 @@ def broadcast_image_to(tensor, target_batch_size, batched_number): else: return torch.cat([tensor] * batched_number, dim=0) -class ControlNet: - def __init__(self, control_model, global_average_pooling=False, device=None): - self.control_model = control_model +class ControlBase: + def __init__(self, device=None): self.cond_hint_original = None self.cond_hint = None self.strength = 1.0 + self.timestep_percent_range = (1.0, 0.0) + self.timestep_range = None + if device is None: device = model_management.get_torch_device() self.device = device self.previous_controlnet = None + + def set_cond_hint(self, cond_hint, strength=1.0, timestep_percent_range=(1.0, 0.0)): + self.cond_hint_original = cond_hint + self.strength = strength + self.timestep_percent_range = timestep_percent_range + return self + + def pre_run(self, model, percent_to_timestep_function): + self.timestep_range = (percent_to_timestep_function(self.timestep_percent_range[0]), percent_to_timestep_function(self.timestep_percent_range[1])) + if self.previous_controlnet is not None: + self.previous_controlnet.pre_run(model, percent_to_timestep_function) + + def set_previous_controlnet(self, controlnet): + self.previous_controlnet = controlnet + return self + + def cleanup(self): + if self.previous_controlnet is not None: + self.previous_controlnet.cleanup() + if self.cond_hint is not None: + del self.cond_hint + self.cond_hint = None + self.timestep_range = None + + def get_models(self): + out = [] + if self.previous_controlnet is not None: + out += self.previous_controlnet.get_models() + out.append(self.control_model) + return out + + def copy_to(self, c): + c.cond_hint_original = self.cond_hint_original + c.strength = self.strength + c.timestep_percent_range = self.timestep_percent_range + +class ControlNet(ControlBase): + def __init__(self, control_model, global_average_pooling=False, device=None): + super().__init__(device) + self.control_model = control_model self.global_average_pooling = global_average_pooling def get_control(self, x_noisy, t, cond, batched_number): @@ -690,6 +732,13 @@ class ControlNet: if self.previous_controlnet is not None: control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number) + if self.timestep_range is not None: + if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]: + if control_prev is not None: + return control_prev + else: + return {} + output_dtype = x_noisy.dtype if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: if self.cond_hint is not None: @@ -737,35 +786,11 @@ class ControlNet: out['input'] = control_prev['input'] return out - def set_cond_hint(self, cond_hint, strength=1.0): - self.cond_hint_original = cond_hint - self.strength = strength - return self - - def set_previous_controlnet(self, controlnet): - self.previous_controlnet = controlnet - return self - - def cleanup(self): - if self.previous_controlnet is not None: - self.previous_controlnet.cleanup() - if self.cond_hint is not None: - del self.cond_hint - self.cond_hint = None - def copy(self): c = ControlNet(self.control_model, global_average_pooling=self.global_average_pooling) - c.cond_hint_original = self.cond_hint_original - c.strength = self.strength + self.copy_to(c) return c - def get_models(self): - out = [] - if self.previous_controlnet is not None: - out += self.previous_controlnet.get_models() - out.append(self.control_model) - return out - def load_controlnet(ckpt_path, model=None): controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True) @@ -870,24 +895,25 @@ def load_controlnet(ckpt_path, model=None): control = ControlNet(control_model, global_average_pooling=global_average_pooling) return control -class T2IAdapter: +class T2IAdapter(ControlBase): def __init__(self, t2i_model, channels_in, device=None): + super().__init__(device) self.t2i_model = t2i_model self.channels_in = channels_in - self.strength = 1.0 - if device is None: - device = model_management.get_torch_device() - self.device = device - self.previous_controlnet = None self.control_input = None - self.cond_hint_original = None - self.cond_hint = None def get_control(self, x_noisy, t, cond, batched_number): control_prev = None if self.previous_controlnet is not None: control_prev = self.previous_controlnet.get_control(x_noisy, t, cond, batched_number) + if self.timestep_range is not None: + if t[0] > self.timestep_range[0] or t[0] < self.timestep_range[1]: + if control_prev is not None: + return control_prev + else: + return {} + if self.cond_hint is None or x_noisy.shape[2] * 8 != self.cond_hint.shape[2] or x_noisy.shape[3] * 8 != self.cond_hint.shape[3]: if self.cond_hint is not None: del self.cond_hint @@ -932,33 +958,11 @@ class T2IAdapter: out['output'] = control_prev['output'] return out - def set_cond_hint(self, cond_hint, strength=1.0): - self.cond_hint_original = cond_hint - self.strength = strength - return self - - def set_previous_controlnet(self, controlnet): - self.previous_controlnet = controlnet - return self - def copy(self): c = T2IAdapter(self.t2i_model, self.channels_in) - c.cond_hint_original = self.cond_hint_original - c.strength = self.strength + self.copy_to(c) return c - def cleanup(self): - if self.previous_controlnet is not None: - self.previous_controlnet.cleanup() - if self.cond_hint is not None: - del self.cond_hint - self.cond_hint = None - - def get_models(self): - out = [] - if self.previous_controlnet is not None: - out += self.previous_controlnet.get_models() - return out def load_t2i_adapter(t2i_data): keys = t2i_data.keys() diff --git a/nodes.py b/nodes.py index 472f0039f..ac2ca1197 100644 --- a/nodes.py +++ b/nodes.py @@ -615,6 +615,8 @@ class ControlNetApplyAdvanced: "control_net": ("CONTROL_NET", ), "image": ("IMAGE", ), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "start": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}) }} RETURN_TYPES = ("CONDITIONING","CONDITIONING") @@ -623,7 +625,7 @@ class ControlNetApplyAdvanced: CATEGORY = "conditioning" - def apply_controlnet(self, positive, negative, control_net, image, strength): + def apply_controlnet(self, positive, negative, control_net, image, strength, start, end): if strength == 0: return (positive, negative) @@ -640,7 +642,7 @@ class ControlNetApplyAdvanced: if prev_cnet in cnets: c_net = cnets[prev_cnet] else: - c_net = control_net.copy().set_cond_hint(control_hint, strength) + c_net = control_net.copy().set_cond_hint(control_hint, strength, (start, end)) c_net.set_previous_controlnet(prev_cnet) cnets[prev_cnet] = c_net From 5f75d784a1773d754b8327eea7d7ece6f24a1ccc Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Jul 2023 18:29:00 -0400 Subject: [PATCH 05/13] Start is now 0.0 and end is now 1.0 for the timestep ranges. --- comfy/sd.py | 7 ++++++- nodes.py | 16 ++++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/comfy/sd.py b/comfy/sd.py index aaeed0561..64ab7cc63 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -713,7 +713,6 @@ class ControlBase: out = [] if self.previous_controlnet is not None: out += self.previous_controlnet.get_models() - out.append(self.control_model) return out def copy_to(self, c): @@ -791,6 +790,12 @@ class ControlNet(ControlBase): self.copy_to(c) return c + def get_models(self): + out = super().get_models() + out.append(self.control_model) + return out + + def load_controlnet(ckpt_path, model=None): controlnet_data = utils.load_torch_file(ckpt_path, safe_load=True) diff --git a/nodes.py b/nodes.py index ac2ca1197..b0d05f0d7 100644 --- a/nodes.py +++ b/nodes.py @@ -208,8 +208,8 @@ class ConditioningSetTimestepRange: @classmethod def INPUT_TYPES(s): return {"required": {"conditioning": ("CONDITIONING", ), - "start": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), - "end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}) + "start": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) }} RETURN_TYPES = ("CONDITIONING",) FUNCTION = "set_range" @@ -220,8 +220,8 @@ class ConditioningSetTimestepRange: c = [] for t in conditioning: d = t[1].copy() - d['start_percent'] = start - d['end_percent'] = end + d['start_percent'] = 1.0 - start + d['end_percent'] = 1.0 - end n = [t[0], d] c.append(n) return (c, ) @@ -615,8 +615,8 @@ class ControlNetApplyAdvanced: "control_net": ("CONTROL_NET", ), "image": ("IMAGE", ), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "start": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), - "end": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}) + "start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), + "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}) }} RETURN_TYPES = ("CONDITIONING","CONDITIONING") @@ -625,7 +625,7 @@ class ControlNetApplyAdvanced: CATEGORY = "conditioning" - def apply_controlnet(self, positive, negative, control_net, image, strength, start, end): + def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent): if strength == 0: return (positive, negative) @@ -642,7 +642,7 @@ class ControlNetApplyAdvanced: if prev_cnet in cnets: c_net = cnets[prev_cnet] else: - c_net = control_net.copy().set_cond_hint(control_hint, strength, (start, end)) + c_net = control_net.copy().set_cond_hint(control_hint, strength, (1.0 - start_percent, 1.0 - end_percent)) c_net.set_previous_controlnet(prev_cnet) cnets[prev_cnet] = c_net From a51f33ee49207d6888642d61cf8c5409b8d8d383 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Mon, 24 Jul 2023 19:47:32 -0400 Subject: [PATCH 06/13] Use bigger tiles when upscaling with model and fallback on OOM. --- comfy_extras/nodes_upscale_model.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/comfy_extras/nodes_upscale_model.py b/comfy_extras/nodes_upscale_model.py index f9252ea0b..abd182e6e 100644 --- a/comfy_extras/nodes_upscale_model.py +++ b/comfy_extras/nodes_upscale_model.py @@ -37,12 +37,23 @@ class ImageUpscaleWithModel: device = model_management.get_torch_device() upscale_model.to(device) in_img = image.movedim(-1,-3).to(device) + free_memory = model_management.get_free_memory(device) + + tile = 512 + overlap = 32 + + oom = True + while oom: + try: + steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap) + pbar = comfy.utils.ProgressBar(steps) + s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar) + oom = False + except model_management.OOM_EXCEPTION as e: + tile //= 2 + if tile < 128: + raise e - tile = 128 + 64 - overlap = 8 - steps = in_img.shape[0] * comfy.utils.get_tiled_scale_steps(in_img.shape[3], in_img.shape[2], tile_x=tile, tile_y=tile, overlap=overlap) - pbar = comfy.utils.ProgressBar(steps) - s = comfy.utils.tiled_scale(in_img, lambda a: upscale_model(a), tile_x=tile, tile_y=tile, overlap=overlap, upscale_amount=upscale_model.scale, pbar=pbar) upscale_model.cpu() s = torch.clamp(s.movedim(-3,-1), min=0, max=1.0) return (s,) From 7c0a5a3e0ee7afa69db8f6f0d47e409c3f0c43d1 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 00:09:01 -0400 Subject: [PATCH 07/13] Disable cuda malloc on a bunch of quadro cards. --- cuda_malloc.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cuda_malloc.py b/cuda_malloc.py index 250d827da..a808b2071 100644 --- a/cuda_malloc.py +++ b/cuda_malloc.py @@ -37,7 +37,11 @@ def get_gpu_names(): return set() def cuda_malloc_supported(): - blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745"} + blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", + "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620", + "Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000", + "Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000"} + try: names = get_gpu_names() except: From 4f9b6f39d12082c1ee310b232ef093d1f048af64 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 00:45:20 -0400 Subject: [PATCH 08/13] Fix potential issue with Save Checkpoint. --- comfy/supported_models.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/comfy/supported_models.py b/comfy/supported_models.py index 915214081..b1c01fe87 100644 --- a/comfy/supported_models.py +++ b/comfy/supported_models.py @@ -126,7 +126,8 @@ class SDXLRefiner(supported_models_base.BASE): def process_clip_state_dict_for_saving(self, state_dict): replace_prefix = {} state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g") - state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") + if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g: + state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") replace_prefix["clip_g"] = "conditioner.embedders.0.model" state_dict_g = supported_models_base.state_dict_prefix_replace(state_dict_g, replace_prefix) return state_dict_g @@ -171,7 +172,8 @@ class SDXL(supported_models_base.BASE): replace_prefix = {} keys_to_replace = {} state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g") - state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") + if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g: + state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids") for k in state_dict: if k.startswith("clip_l"): state_dict_g[k] = state_dict[k] From 315ba30c81ce73f94b44e74b6c1dbc8951c8720f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 15:44:49 -0400 Subject: [PATCH 09/13] Update nightly ROCm pytorch command in readme to 5.6 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5e32a74f3..ad85d3d49 100644 --- a/README.md +++ b/README.md @@ -93,8 +93,8 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins ```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2``` -This is the command to install the nightly with ROCm 5.5 that supports the 7000 series and might have some performance improvements: -```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.5 -r requirements.txt``` +This is the command to install the nightly with ROCm 5.6 that supports the 7000 series and might have some performance improvements: +```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6 -r requirements.txt``` ### NVIDIA From 727588d076e45f7130af0b4fe61cb30be5a5328c Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 16:39:15 -0400 Subject: [PATCH 10/13] Fix some new loras. --- comfy/sd.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/comfy/sd.py b/comfy/sd.py index 64ab7cc63..70701ab6b 100644 --- a/comfy/sd.py +++ b/comfy/sd.py @@ -170,6 +170,8 @@ def model_lora_keys_clip(model, key_map={}): if k in sdk: lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c]) key_map[lora_key] = k + lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c]) + key_map[lora_key] = k k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c) if k in sdk: From 5e3ac1928aec03009e22a3d79e1ba9b3f16a738e Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Tue, 25 Jul 2023 22:02:26 -0400 Subject: [PATCH 11/13] Implement modelspec metadata in CheckpointSave for SDXL and refiner. --- comfy_extras/nodes_model_merging.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/comfy_extras/nodes_model_merging.py b/comfy_extras/nodes_model_merging.py index 95c4cfece..6146c4500 100644 --- a/comfy_extras/nodes_model_merging.py +++ b/comfy_extras/nodes_model_merging.py @@ -1,5 +1,7 @@ import comfy.sd import comfy.utils +import comfy.model_base + import folder_paths import json import os @@ -100,6 +102,31 @@ class CheckpointSave: prompt_info = json.dumps(prompt) metadata = {"prompt": prompt_info} + + + enable_modelspec = True + if isinstance(model.model, comfy.model_base.SDXL): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base" + elif isinstance(model.model, comfy.model_base.SDXLRefiner): + metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner" + else: + enable_modelspec = False + + if enable_modelspec: + metadata["modelspec.sai_model_spec"] = "1.0.0" + metadata["modelspec.implementation"] = "sgm" + metadata["modelspec.title"] = "{} {}".format(filename, counter) + + #TODO: + # "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512", + # "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h", + # "v2-inpainting" + + if model.model.model_type == comfy.model_base.ModelType.EPS: + metadata["modelspec.predict_key"] = "epsilon" + elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION: + metadata["modelspec.predict_key"] = "v" + if extra_pnginfo is not None: for x in extra_pnginfo: metadata[x] = json.dumps(extra_pnginfo[x]) From 5379051d1629c1c0849e8292a7bb8717d0bfe749 Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 26 Jul 2023 18:26:39 -0400 Subject: [PATCH 12/13] Fix diffusers VAE loading. --- comfy/diffusers_convert.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/comfy/diffusers_convert.py b/comfy/diffusers_convert.py index 9688cbd52..a9eb9302f 100644 --- a/comfy/diffusers_convert.py +++ b/comfy/diffusers_convert.py @@ -148,6 +148,10 @@ vae_conversion_map_attn = [ ("q.", "query."), ("k.", "key."), ("v.", "value."), + ("q.", "to_q."), + ("k.", "to_k."), + ("v.", "to_v."), + ("proj_out.", "to_out.0."), ("proj_out.", "proj_attn."), ] From 4ab75d9cb86f55b272d27c17ad82af5d52be390f Mon Sep 17 00:00:00 2001 From: comfyanonymous Date: Wed, 26 Jul 2023 21:50:19 -0400 Subject: [PATCH 13/13] Update colab notebook with SDXL links. --- notebooks/comfyui_colab.ipynb | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/notebooks/comfyui_colab.ipynb b/notebooks/comfyui_colab.ipynb index 61c277bf6..1bb90f7d0 100644 --- a/notebooks/comfyui_colab.ipynb +++ b/notebooks/comfyui_colab.ipynb @@ -69,6 +69,13 @@ "source": [ "# Checkpoints\n", "\n", + "### SDXL\n", + "### I recommend these workflow examples: https://comfyanonymous.github.io/ComfyUI_examples/sdxl/\n", + "\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -P ./models/checkpoints/\n", + "\n", + "\n", "# SD1.5\n", "!wget -c https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -P ./models/checkpoints/\n", "\n", @@ -83,7 +90,7 @@ "#!wget -c https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors -P ./models/checkpoints/\n", "\n", "# Waifu Diffusion 1.5 (anime style SD2.x 768-v)\n", - "#!wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta2/resolve/main/checkpoints/wd-1-5-beta2-fp16.safetensors -P ./models/checkpoints/\n", + "#!wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta3/resolve/main/wd-illusion-fp16.safetensors -P ./models/checkpoints/\n", "\n", "\n", "# unCLIP models\n", @@ -100,6 +107,7 @@ "# Loras\n", "#!wget -c https://civitai.com/api/download/models/10350 -O ./models/loras/theovercomer8sContrastFix_sd21768.safetensors #theovercomer8sContrastFix SD2.x 768-v\n", "#!wget -c https://civitai.com/api/download/models/10638 -O ./models/loras/theovercomer8sContrastFix_sd15.safetensors #theovercomer8sContrastFix SD1.x\n", + "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors -P ./models/loras/ #SDXL offset noise lora\n", "\n", "\n", "# T2I-Adapter\n",