diff --git a/comfy/model_management.py b/comfy/model_management.py index c89f7a246..3e58e7dd9 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -639,6 +639,8 @@ class LoadedModel: return True def model_use_more_vram(self, extra_memory, force_patch_weights=False): + if self.device != self.model.load_device: + logging.error(f"LoadedModel device mismatch: self.device={self.device}, model.load_device={self.model.load_device}, model_class={self.model.model.__class__.__name__}, is_multigpu={getattr(self.model, 'is_multigpu_base_clone', False)}, id(model)={id(self.model)}") return self.model.partially_load(self.device, extra_memory, force_patch_weights=force_patch_weights) def __eq__(self, other): diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index c3ecc276f..a3872926d 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -1646,6 +1646,8 @@ class ModelPatcherDynamic(ModelPatcher): #now. assert not full_load + if device_to != self.load_device: + logging.error(f"ModelPatcherDynamic.load device mismatch: device_to={device_to}, self.load_device={self.load_device}, model_class={self.model.__class__.__name__}, is_multigpu_base_clone={getattr(self, 'is_multigpu_base_clone', False)}, id(self)={id(self)}") assert device_to == self.load_device num_patches = 0