mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-09 04:52:32 +08:00
Changed to min dimension size in the ResizeImage. Added a TextLoader node and example negative prompt
This commit is contained in:
parent
29bc094880
commit
3b0e4d7305
@ -7,7 +7,7 @@ class ResizeImage:
|
||||
def INPUT_TYPES(s):
|
||||
return {"required":
|
||||
{"image": ("IMAGE",),
|
||||
"max_dimension_size": ("INT", {"default": 512, "min": 0, "max": 4096, "step": 64}),
|
||||
"min_dimension_size": ("INT", {"default": 512, "min": 0, "max": 4096, "step": 64}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -16,24 +16,18 @@ class ResizeImage:
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "resize_image"
|
||||
|
||||
def resize_image(self, image, max_dimension_size):
|
||||
def resize_image(self, image, min_dimension_size):
|
||||
_, height, width, _ = image.shape
|
||||
|
||||
# Calculate the new dimensions while maintaining the aspect ratio
|
||||
if height > width:
|
||||
new_height = max_dimension_size
|
||||
new_width = int(width * (max_dimension_size / height))
|
||||
if height < width:
|
||||
new_height = min_dimension_size
|
||||
new_width = int(width * (min_dimension_size / height))
|
||||
else:
|
||||
new_width = max_dimension_size
|
||||
new_height = int(height * (max_dimension_size / width))
|
||||
new_width = min_dimension_size
|
||||
new_height = int(height * (min_dimension_size / width))
|
||||
|
||||
# Rearrange the image tensor to (1, 3, height, width) format
|
||||
image = image.permute(0, 3, 1, 2)
|
||||
|
||||
# Resize the image using F.interpolate
|
||||
resized_image = F.interpolate(image, size=(new_height, new_width), mode='bilinear', align_corners=False)
|
||||
|
||||
# Rearrange the resized image tensor back to (1, height, width, 3) format
|
||||
resized_image = resized_image.permute(0, 2, 3, 1)
|
||||
|
||||
return (resized_image,)
|
||||
|
||||
@ -1,9 +1,50 @@
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
import folder_paths
|
||||
|
||||
|
||||
class TextLoader:
|
||||
def __init__(self, event_dispatcher):
|
||||
self.event_dispatcher = event_dispatcher
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
prompts_dir = folder_paths.prompt_directory
|
||||
return {"required":
|
||||
{"prompt_file": (sorted(os.listdir(prompts_dir)),)},
|
||||
}
|
||||
|
||||
CATEGORY = "utils"
|
||||
|
||||
RETURN_TYPES = ("TEXT",)
|
||||
FUNCTION = "load_text"
|
||||
|
||||
def load_text(self, prompt_file):
|
||||
text_file_path = os.path.join(folder_paths.prompt_directory, prompt_file)
|
||||
with open(text_file_path, 'r') as f:
|
||||
text = f.read()
|
||||
return (text,)
|
||||
|
||||
@classmethod
|
||||
def IS_CHANGED(s, prompt_file):
|
||||
text_file_path = os.path.join(folder_paths.prompt_directory, prompt_file)
|
||||
m = hashlib.sha256()
|
||||
with open(text_file_path, 'rb') as f:
|
||||
m.update(f.read())
|
||||
return m.digest().hex()
|
||||
|
||||
@classmethod
|
||||
def VALIDATE_INPUTS(s, prompt_file):
|
||||
text_file_path = os.path.join(folder_paths.prompt_directory, prompt_file)
|
||||
if not os.path.exists(text_file_path):
|
||||
return "Invalid text file: {}".format(text_file_path)
|
||||
|
||||
return True
|
||||
|
||||
class PrintNode:
|
||||
|
||||
def __init__(self, event_dispatcher):
|
||||
self.event_dispatcher = event_dispatcher
|
||||
|
||||
@ -33,33 +74,40 @@ class PrintNode:
|
||||
print(f"Latent hash: {latent_hash}")
|
||||
print(np.array2string(latent["samples"].cpu().numpy(), separator=', '))
|
||||
|
||||
output_text = []
|
||||
|
||||
# attention[a][b][c][d]
|
||||
# a: number of steps/sigma in this diffusion process
|
||||
# b: number of SpatialTransformer or AttentionBlocks used in the middle blocks of the latent diffusion model
|
||||
# c: number of transformer layers in the SpatialTransformer or AttentionBlocks
|
||||
# d: attn1, attn2
|
||||
if attention is not None:
|
||||
print(f'attention has {len(attention)} steps')
|
||||
print(f'each step has {len(attention[0])} transformer blocks')
|
||||
print(f'each block has {len(attention[0][0])} transformer layers')
|
||||
print(f'each transformer layer has {len(attention[0][0][0])} attention tensors (attn1, attn2)')
|
||||
print(f'the shape of the attention tensors is {attention[0][0][0][0].shape}')
|
||||
print(f'the first value of the first attention tensor is {attention[0][0][0][0][:1]}')
|
||||
|
||||
output_text.append(f'attention has {len(attention)} steps\n')
|
||||
output_text[-1] += f'each step has {len(attention[0])} transformer blocks\n'
|
||||
output_text[-1] += f'each block has {len(attention[0][0])} transformer layers\n'
|
||||
output_text[-1] += f'each transformer layer has {len(attention[0][0][0])} attention tensors (attn1, attn2)\n'
|
||||
output_text[-1] += f'the shape of the attention tensors is {attention[0][0][0][0].shape}\n'
|
||||
output_text[-1] += f'the first value of the first attention tensor is {attention[0][0][0][0][:1]}\n'
|
||||
print(output_text[-1])
|
||||
|
||||
if text is not None:
|
||||
output_text.append(text)
|
||||
print(text)
|
||||
|
||||
if image is not None:
|
||||
_, height, width, _ = image.shape
|
||||
print(f"Image dimensions: {width}x{height}")
|
||||
output_text.append(f"Image dimensions: {width}x{height}\n")
|
||||
print(output_text[-1])
|
||||
|
||||
return {"ui": {"": text}}
|
||||
|
||||
return {"ui": {"text": "\n".join(output_text)}}
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"PrintNode": PrintNode,
|
||||
"TextLoader": TextLoader,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"PrintNode": "Print",
|
||||
"TextLoader": "Text Loader",
|
||||
}
|
||||
|
||||
@ -37,6 +37,7 @@ folder_names_and_paths["hypernetworks"] = ([os.path.join(models_dir, "hypernetwo
|
||||
output_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output")
|
||||
temp_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "temp")
|
||||
input_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "input")
|
||||
prompt_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "prompts")
|
||||
|
||||
if not os.path.exists(input_directory):
|
||||
os.makedirs(input_directory)
|
||||
|
||||
12
nodes.py
12
nodes.py
@ -42,14 +42,20 @@ class CLIPTextEncode:
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )}}
|
||||
return {
|
||||
"required": {"text": ("STRING", {"multiline": True}), "clip": ("CLIP", )},
|
||||
"optional": {"external_text": ("text",)}
|
||||
}
|
||||
RETURN_TYPES = ("CONDITIONING",)
|
||||
FUNCTION = "encode"
|
||||
|
||||
CATEGORY = "conditioning"
|
||||
|
||||
def encode(self, clip, text):
|
||||
return ([[clip.encode(text), {}]], )
|
||||
def encode(self, clip, text, external_text=None):
|
||||
if external_text is not None:
|
||||
return ([[clip.encode(external_text), {}]], )
|
||||
else:
|
||||
return ([[clip.encode(text), {}]], )
|
||||
|
||||
class ConditioningCombine:
|
||||
def __init__(self, event_dispatcher):
|
||||
|
||||
1
prompts/negative_prompts.txt
Normal file
1
prompts/negative_prompts.txt
Normal file
@ -0,0 +1 @@
|
||||
bad_prompt_version2, censorship, ugly, old, deformed, amateur drawing, odd, fat, anime, cell shading, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, logo, blurry, out of focus, cell shading, anime, watercolor, desaturated, ((nude))
|
||||
Loading…
Reference in New Issue
Block a user