mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-18 18:30:19 +08:00
Adds crop to multiple mode to ResizeImageMaskNode. (#11838)
* Add crop-to-multiple resize mode * Make scale-to-multiple shape handling explicit
This commit is contained in:
parent
eff2b9d412
commit
469dd9c16a
@ -254,6 +254,7 @@ class ResizeType(str, Enum):
|
|||||||
SCALE_HEIGHT = "scale height"
|
SCALE_HEIGHT = "scale height"
|
||||||
SCALE_TOTAL_PIXELS = "scale total pixels"
|
SCALE_TOTAL_PIXELS = "scale total pixels"
|
||||||
MATCH_SIZE = "match size"
|
MATCH_SIZE = "match size"
|
||||||
|
SCALE_TO_MULTIPLE = "scale to multiple"
|
||||||
|
|
||||||
def is_image(input: torch.Tensor) -> bool:
|
def is_image(input: torch.Tensor) -> bool:
|
||||||
# images have 4 dimensions: [batch, height, width, channels]
|
# images have 4 dimensions: [batch, height, width, channels]
|
||||||
@ -363,6 +364,43 @@ def scale_match_size(input: torch.Tensor, match: torch.Tensor, scale_method: str
|
|||||||
input = finalize_image_mask_input(input, is_type_image)
|
input = finalize_image_mask_input(input, is_type_image)
|
||||||
return input
|
return input
|
||||||
|
|
||||||
|
def scale_to_multiple_cover(input: torch.Tensor, multiple: int, scale_method: str) -> torch.Tensor:
|
||||||
|
if multiple <= 1:
|
||||||
|
return input
|
||||||
|
is_type_image = is_image(input)
|
||||||
|
if is_type_image:
|
||||||
|
_, height, width, _ = input.shape
|
||||||
|
else:
|
||||||
|
_, height, width = input.shape
|
||||||
|
target_w = (width // multiple) * multiple
|
||||||
|
target_h = (height // multiple) * multiple
|
||||||
|
if target_w == 0 or target_h == 0:
|
||||||
|
return input
|
||||||
|
if target_w == width and target_h == height:
|
||||||
|
return input
|
||||||
|
s_w = target_w / width
|
||||||
|
s_h = target_h / height
|
||||||
|
if s_w >= s_h:
|
||||||
|
scaled_w = target_w
|
||||||
|
scaled_h = int(math.ceil(height * s_w))
|
||||||
|
if scaled_h < target_h:
|
||||||
|
scaled_h = target_h
|
||||||
|
else:
|
||||||
|
scaled_h = target_h
|
||||||
|
scaled_w = int(math.ceil(width * s_h))
|
||||||
|
if scaled_w < target_w:
|
||||||
|
scaled_w = target_w
|
||||||
|
input = init_image_mask_input(input, is_type_image)
|
||||||
|
input = comfy.utils.common_upscale(input, scaled_w, scaled_h, scale_method, "disabled")
|
||||||
|
input = finalize_image_mask_input(input, is_type_image)
|
||||||
|
x0 = (scaled_w - target_w) // 2
|
||||||
|
y0 = (scaled_h - target_h) // 2
|
||||||
|
x1 = x0 + target_w
|
||||||
|
y1 = y0 + target_h
|
||||||
|
if is_type_image:
|
||||||
|
return input[:, y0:y1, x0:x1, :]
|
||||||
|
return input[:, y0:y1, x0:x1]
|
||||||
|
|
||||||
class ResizeImageMaskNode(io.ComfyNode):
|
class ResizeImageMaskNode(io.ComfyNode):
|
||||||
|
|
||||||
scale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
scale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
||||||
@ -378,6 +416,7 @@ class ResizeImageMaskNode(io.ComfyNode):
|
|||||||
longer_size: int
|
longer_size: int
|
||||||
shorter_size: int
|
shorter_size: int
|
||||||
megapixels: float
|
megapixels: float
|
||||||
|
multiple: int
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def define_schema(cls):
|
def define_schema(cls):
|
||||||
@ -417,6 +456,9 @@ class ResizeImageMaskNode(io.ComfyNode):
|
|||||||
io.MultiType.Input("match", [io.Image, io.Mask]),
|
io.MultiType.Input("match", [io.Image, io.Mask]),
|
||||||
crop_combo,
|
crop_combo,
|
||||||
]),
|
]),
|
||||||
|
io.DynamicCombo.Option(ResizeType.SCALE_TO_MULTIPLE, [
|
||||||
|
io.Int.Input("multiple", default=8, min=1, max=MAX_RESOLUTION, step=1),
|
||||||
|
]),
|
||||||
]),
|
]),
|
||||||
io.Combo.Input("scale_method", options=cls.scale_methods, default="area"),
|
io.Combo.Input("scale_method", options=cls.scale_methods, default="area"),
|
||||||
],
|
],
|
||||||
@ -442,6 +484,8 @@ class ResizeImageMaskNode(io.ComfyNode):
|
|||||||
return io.NodeOutput(scale_total_pixels(input, resize_type["megapixels"], scale_method))
|
return io.NodeOutput(scale_total_pixels(input, resize_type["megapixels"], scale_method))
|
||||||
elif selected_type == ResizeType.MATCH_SIZE:
|
elif selected_type == ResizeType.MATCH_SIZE:
|
||||||
return io.NodeOutput(scale_match_size(input, resize_type["match"], scale_method, resize_type["crop"]))
|
return io.NodeOutput(scale_match_size(input, resize_type["match"], scale_method, resize_type["crop"]))
|
||||||
|
elif selected_type == ResizeType.SCALE_TO_MULTIPLE:
|
||||||
|
return io.NodeOutput(scale_to_multiple_cover(input, resize_type["multiple"], scale_method))
|
||||||
raise ValueError(f"Unsupported resize type: {selected_type}")
|
raise ValueError(f"Unsupported resize type: {selected_type}")
|
||||||
|
|
||||||
def batch_images(images: list[torch.Tensor]) -> torch.Tensor | None:
|
def batch_images(images: list[torch.Tensor]) -> torch.Tensor | None:
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user