diff --git a/comfy/model_management.py b/comfy/model_management.py index 106d8eec8..ab7846d59 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -535,14 +535,11 @@ def load_models_gpu(models, memory_required=0, force_patch_weights=False, minimu vram_set_state = vram_state lowvram_model_memory = 0 if lowvram_available and (vram_set_state == VRAMState.LOW_VRAM or vram_set_state == VRAMState.NORMAL_VRAM) and not force_full_load: - model_size = loaded_model.model_memory_required(torch_dev) loaded_memory = loaded_model.model_loaded_memory() current_free_mem = get_free_memory(torch_dev) + loaded_memory lowvram_model_memory = max(64 * 1024 * 1024, (current_free_mem - minimum_memory_required), min(current_free_mem * MIN_WEIGHT_MEMORY_RATIO, current_free_mem - minimum_inference_memory())) lowvram_model_memory = max(0.1, lowvram_model_memory - loaded_memory) - if model_size <= lowvram_model_memory: #only switch to lowvram if really necessary - lowvram_model_memory = 0 if vram_set_state == VRAMState.NO_VRAM: lowvram_model_memory = 0.1 diff --git a/nodes.py b/nodes.py index ef51994ef..968f0f9ad 100644 --- a/nodes.py +++ b/nodes.py @@ -63,6 +63,8 @@ class CLIPTextEncode(ComfyNodeABC): DESCRIPTION = "Encodes a text prompt using a CLIP model into an embedding that can be used to guide the diffusion model towards generating specific images." def encode(self, clip, text): + if clip is None: + raise RuntimeError("ERROR: clip input is invalid: None\n\nIf the clip is from a checkpoint loader node your checkpoint does not contain a valid clip or text encoder model.") tokens = clip.tokenize(text) return (clip.encode_from_tokens_scheduled(tokens), )