diff --git a/.github/workflows/stable-release.yml b/.github/workflows/stable-release.yml index f501b7b31..bc64ed74d 100644 --- a/.github/workflows/stable-release.yml +++ b/.github/workflows/stable-release.yml @@ -145,6 +145,8 @@ jobs: cp -r ComfyUI/.ci/windows_${{ inputs.rel_name }}_base_files/* ./ cp ../update_comfyui_and_python_dependencies.bat ./update/ + echo 'local-portable' > ComfyUI/.comfy_environment + cd .. "C:\Program Files\7-Zip\7z.exe" a -t7z -m0=lzma2 -mx=9 -mfb=128 -md=768m -ms=on -mf=BCJ2 ComfyUI_windows_portable.7z ComfyUI_windows_portable diff --git a/comfy/model_patcher.py b/comfy/model_patcher.py index 7d2d6883f..33bdedfb1 100644 --- a/comfy/model_patcher.py +++ b/comfy/model_patcher.py @@ -26,6 +26,7 @@ import uuid from typing import Callable, Optional import torch +import tqdm import comfy.float import comfy.hooks @@ -1651,7 +1652,11 @@ class ModelPatcherDynamic(ModelPatcher): self.model.model_loaded_weight_memory += casted_buf.numel() * casted_buf.element_size() force_load_stat = f" Force pre-loaded {len(self.backup)} weights: {self.model.model_loaded_weight_memory // 1024} KB." if len(self.backup) > 0 else "" - logging.info(f"Model {self.model.__class__.__name__} prepared for dynamic VRAM loading. {allocated_size // (1024 ** 2)}MB Staged. {num_patches} patches attached.{force_load_stat}") + log_key = (self.patches_uuid, allocated_size, num_patches, len(self.backup), self.model.model_loaded_weight_memory) + in_loop = bool(getattr(tqdm.tqdm, "_instances", None)) + level = logging.DEBUG if in_loop and getattr(self, "_last_prepare_log_key", None) == log_key else logging.INFO + self._last_prepare_log_key = log_key + logging.log(level, f"Model {self.model.__class__.__name__} prepared for dynamic VRAM loading. {allocated_size // (1024 ** 2)}MB Staged. {num_patches} patches attached.{force_load_stat}") self.model.device = device_to self.model.current_weight_patches_uuid = self.patches_uuid diff --git a/server.py b/server.py index 182f08576..25bf315de 100644 --- a/server.py +++ b/server.py @@ -561,7 +561,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type=f'image/{image_format}', - headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) + headers={"Content-Disposition": f"filename=\"{filename}\""}) if 'channel' not in request.rel_url.query: channel = 'rgba' @@ -581,7 +581,7 @@ class PromptServer(): buffer.seek(0) return web.Response(body=buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) + headers={"Content-Disposition": f"filename=\"{filename}\""}) elif channel == 'a': with Image.open(file) as img: @@ -598,7 +598,7 @@ class PromptServer(): alpha_buffer.seek(0) return web.Response(body=alpha_buffer.read(), content_type='image/png', - headers={"Content-Disposition": f"attachment; filename=\"{filename}\""}) + headers={"Content-Disposition": f"filename=\"{filename}\""}) else: # Use the content type from asset resolution if available, # otherwise guess from the filename. @@ -615,7 +615,7 @@ class PromptServer(): return web.FileResponse( file, headers={ - "Content-Disposition": f"attachment; filename=\"{filename}\"", + "Content-Disposition": f"filename=\"{filename}\"", "Content-Type": content_type } )