diff --git a/comfy/cmd/server.py b/comfy/cmd/server.py index a99252ba7..28a047e28 100644 --- a/comfy/cmd/server.py +++ b/comfy/cmd/server.py @@ -25,7 +25,7 @@ from aiohttp import web from can_ada import URL, parse as urlparse from typing_extensions import NamedTuple -import comfy.interruption +from .. import interruption from .latent_preview_image_encoding import encode_preview_image from .. import model_management from .. import utils @@ -527,7 +527,7 @@ class PromptServer(ExecutorToClientProgress): @routes.post("/interrupt") async def post_interrupt(request): - comfy.interruption.interrupt_current_processing() + interruption.interrupt_current_processing() return web.Response(status=200) @routes.post("/free") diff --git a/comfy/sample.py b/comfy/sample.py index e021385d8..cf8fcb86d 100644 --- a/comfy/sample.py +++ b/comfy/sample.py @@ -29,7 +29,7 @@ def prepare_noise(latent_image, seed, noise_inds=None): def fix_empty_latent_channels(model, latent_image): latent_channels = model.get_model_object("latent_format").latent_channels #Resize the empty latent image so it has the right number of channels if latent_channels != latent_image.shape[1] and torch.count_nonzero(latent_image) == 0: - latent_image = comfy.utils.repeat_to_batch_size(latent_image, latent_channels, dim=1) + latent_image = utils.repeat_to_batch_size(latent_image, latent_channels, dim=1) return latent_image def prepare_sampling(model, noise_shape, positive, negative, noise_mask):