diff --git a/comfy/model_base.py b/comfy/model_base.py index cb7689e84..75ec42699 100644 --- a/comfy/model_base.py +++ b/comfy/model_base.py @@ -1024,6 +1024,8 @@ class CosmosPredict2(BaseModel): def process_timestep(self, timestep, x, denoise_mask=None, **kwargs): if denoise_mask is None: return timestep + if denoise_mask.ndim <= 4: + return timestep condition_video_mask_B_1_T_1_1 = denoise_mask.mean(dim=[1, 3, 4], keepdim=True) c_noise_B_1_T_1_1 = 0.0 * (1.0 - condition_video_mask_B_1_T_1_1) + timestep.reshape(timestep.shape[0], 1, 1, 1, 1) * condition_video_mask_B_1_T_1_1 out = c_noise_B_1_T_1_1.squeeze(dim=[1, 3, 4]) diff --git a/main.py b/main.py index a0e2f578f..cd6980bdd 100644 --- a/main.py +++ b/main.py @@ -197,7 +197,13 @@ def prompt_worker(q, server_instance): current_time = time.perf_counter() execution_time = current_time - execution_start_time - logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) + + # Log Time in a more readable way after 10 minutes + if execution_time > 600: + execution_time = time.strftime("%H:%M:%S", time.gmtime(execution_time)) + logging.info(f"Prompt executed in {execution_time}") + else: + logging.info("Prompt executed in {:.2f} seconds".format(execution_time)) flags = q.get_flags() free_memory = flags.get("free_memory", False)