ComfyUI/custom_nodes/image_crop.py
inflamously 98155446bf feat: add image cropping
feat: remove custom js stuff since it just doesnt work well
feat: add cond debugging

feat: impls PoC refresh of custom_nodes + custom_node extensions

feat: ignore workflows folder
feat: add batch file to start application under windows
feat: integrate reload custom node into refresh
feat: update custom node ui
feat: impl node change event handling

!WIP!

feat: add CustomNodeData class for reuse
feat: remove all reloaded nodes for test purposes and save graph afterwards
!WIP!

feat: remove unused registeredNodes
feat: comment out graph removal

feat: comment on some functions for proper understanding and bookmarking (for now)

feat: comment node execution location
feat: add exception for IS_CHANGED issues
feat: extend example_node README

!WIP!

feat: custom test nodes for now

!WIP!

feat: avoid refresh spam

feat: add debug_cond custom_node with WIP ui

feat: add hint for validating output_ui data

feat: pass refresh button into combo function

feat: impl output ui error

feat: auto refresh nodes
fix: various minor issues

!WIP!

feat: barebone JS scripting in BE for ui templating

!WIP!

feat: impl interrogation with clip
feat: impl more debug samplers
feat: change requirements.txt for transformers

fix: __init__.py issues when importing custom_nodes

feat: temp ignore 3rdparty code

feat: add custom_nodes debug_latent and image_fx
2023-09-15 00:16:44 +02:00

64 lines
2.0 KiB
Python

import math
import einops
import torch
import torchvision.transforms as T
from PIL import ImageFilter
from PIL.Image import Image
import nodes
class ImageCrop:
@classmethod
def INPUT_TYPES(s):
return {"required":
{
"vae": ("VAE",),
"latent": ("LATENT",),
"center_x": ("INT", {
"default": 0,
"min": 0, # Minimum value
"max": 4096, # Maximum value
"step": 16, # Slider's step
}),
"center_y": ("INT", {
"default": 0,
"min": 0, # Minimum value
"max": 4096, # Maximum value
"step": 16, # Slider's step
}),
"pixelradius": ("INT", {
"default": 0,
"min": 0, # Minimum value
"max": 4096, # Maximum value
"step": 16, # Slider's step
})
}
}
RETURN_TYPES = ("LATENT", "IMAGE",)
FUNCTION = "image_crop"
OUTPUT_NODE = True
CATEGORY = "inflamously"
def image_crop(self, vae, latent, center_x, center_y, pixelradius):
tensor_img = vae.decode(latent["samples"])
stripped_tensor_img = tensor_img[0]
h, w, c = stripped_tensor_img.size()
pil_img: Image = T.ToPILImage()(einops.rearrange(stripped_tensor_img, "h w c -> c h w"))
nw, nh = center_x + pixelradius / 2, center_y + pixelradius / 2
pil_img = pil_img.crop((center_x - pixelradius / 2, center_y - pixelradius / 2, nw, nh))
new_tensor_img = einops.reduce(T.ToTensor()(pil_img), "c h w -> 1 h w c", "max")
# new_tensor_img = new_stripped_tensor_img.permute(0, 1, 2, 3)
pixels = nodes.VAEEncode.vae_encode_crop_pixels(new_tensor_img)
new_latent = vae.encode(pixels[:, :, :, :3])
return ({"samples": new_latent}, new_tensor_img)
NODE_CLASS_MAPPINGS = {
"ImageCrop": ImageCrop
}