Merge branch 'comfyanonymous:master' into feature/toggle_migration

This commit is contained in:
Dr.Lt.Data 2023-09-15 12:53:45 +09:00 committed by GitHub
commit 7247cb6c6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 219 additions and 82 deletions

18
.gitignore vendored
View File

@ -1,16 +1,16 @@
__pycache__/
*.py[cod]
output/
input/
!input/example.png
models/
temp/
custom_nodes/
/output/
/input/
!/input/example.png
/models/
/temp/
/custom_nodes/
!custom_nodes/example_node.py.example
extra_model_paths.yaml
/.vs
.idea/
venv/
web/extensions/*
!web/extensions/logging.js.example
!web/extensions/core/
/web/extensions/*
!/web/extensions/logging.js.example
!/web/extensions/core/

View File

@ -77,9 +77,9 @@ Simply download, extract with [7-Zip](https://7-zip.org) and run. Make sure you
See the [Config file](extra_model_paths.yaml.example) to set the search paths for models. In the standalone windows build you can find this file in the ComfyUI directory. Rename this file to extra_model_paths.yaml and edit it with your favorite text editor.
## Colab Notebook
## Jupyter Notebook
To run it on colab or paperspace you can use my [Colab Notebook](notebooks/comfyui_colab.ipynb) here: [Link to open with google colab](https://colab.research.google.com/github/comfyanonymous/ComfyUI/blob/master/notebooks/comfyui_colab.ipynb)
To run it on services like paperspace, kaggle or colab you can use my [Jupyter Notebook](notebooks/comfyui_colab.ipynb)
## Manual Install (Windows, Linux)

View File

@ -1,6 +1,6 @@
import argparse
import enum
import comfy.options
class EnumAction(argparse.Action):
"""
@ -94,7 +94,10 @@ parser.add_argument("--windows-standalone-build", action="store_true", help="Win
parser.add_argument("--disable-metadata", action="store_true", help="Disable saving prompt metadata in files.")
args = parser.parse_args()
if comfy.options.args_parsing:
args = parser.parse_args()
else:
args = parser.parse_args([])
if args.windows_standalone_build:
args.auto_launch = True

View File

@ -49,12 +49,17 @@ class ClipVisionModel():
precision_scope = lambda a, b: contextlib.nullcontext(a)
with precision_scope(comfy.model_management.get_autocast_device(self.load_device), torch.float32):
outputs = self.model(pixel_values=pixel_values)
outputs = self.model(pixel_values=pixel_values, output_hidden_states=True)
for k in outputs:
t = outputs[k]
if t is not None:
outputs[k] = t.cpu()
if k == 'hidden_states':
outputs["penultimate_hidden_states"] = t[-2].cpu()
outputs["hidden_states"] = None
else:
outputs[k] = t.cpu()
return outputs
def convert_to_transformers(sd, prefix):

View File

@ -449,10 +449,18 @@ class T2IAdapter(ControlBase):
return c
def load_t2i_adapter(t2i_data):
keys = t2i_data.keys()
if 'adapter' in keys:
if 'adapter' in t2i_data:
t2i_data = t2i_data['adapter']
keys = t2i_data.keys()
if 'adapter.body.0.resnets.0.block1.weight' in t2i_data: #diffusers format
prefix_replace = {}
for i in range(4):
for j in range(2):
prefix_replace["adapter.body.{}.resnets.{}.".format(i, j)] = "body.{}.".format(i * 2 + j)
prefix_replace["adapter.body.{}.".format(i, j)] = "body.{}.".format(i * 2)
prefix_replace["adapter."] = ""
t2i_data = comfy.utils.state_dict_prefix_replace(t2i_data, prefix_replace)
keys = t2i_data.keys()
if "body.0.in_conv.weight" in keys:
cin = t2i_data['body.0.in_conv.weight'].shape[1]
model_ad = comfy.t2i_adapter.adapter.Adapter_light(cin=cin, channels=[320, 640, 1280, 1280], nums_rb=4)

View File

@ -451,6 +451,8 @@ def text_encoder_device():
if args.gpu_only:
return get_torch_device()
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
if is_intel_xpu():
return torch.device("cpu")
if should_use_fp16(prioritize_performance=False):
return get_torch_device()
else:

6
comfy/options.py Normal file
View File

@ -0,0 +1,6 @@
args_parsing = False
def enable_args_parsing(enable=True):
global args_parsing
args_parsing = enable

View File

@ -263,8 +263,6 @@ def sampling_function(model_function, x, timestep, uncond, cond, cond_scale, con
output = model_function(input_x, timestep_, **c).chunk(batch_chunks)
del input_x
model_management.throw_exception_if_processing_interrupted()
for o in range(batch_chunks):
if cond_or_uncond[o] == COND:
out_cond[:,:,area[o][2]:area[o][0] + area[o][2],area[o][3]:area[o][1] + area[o][3]] += output[o] * mult[o]
@ -390,11 +388,20 @@ def get_mask_aabb(masks):
return bounding_boxes, is_empty
def resolve_cond_masks(conditions, h, w, device):
def resolve_areas_and_cond_masks(conditions, h, w, device):
# We need to decide on an area outside the sampling loop in order to properly generate opposite areas of equal sizes.
# While we're doing this, we can also resolve the mask device and scaling for performance reasons
for i in range(len(conditions)):
c = conditions[i]
if 'area' in c[1]:
area = c[1]['area']
if area[0] == "percentage":
modified = c[1].copy()
area = (max(1, round(area[1] * h)), max(1, round(area[2] * w)), round(area[3] * h), round(area[4] * w))
modified['area'] = area
c = [c[0], modified]
conditions[i] = c
if 'mask' in c[1]:
mask = c[1]['mask']
mask = mask.to(device=device)
@ -622,8 +629,8 @@ class KSampler:
positive = positive[:]
negative = negative[:]
resolve_cond_masks(positive, noise.shape[2], noise.shape[3], self.device)
resolve_cond_masks(negative, noise.shape[2], noise.shape[3], self.device)
resolve_areas_and_cond_masks(positive, noise.shape[2], noise.shape[3], self.device)
resolve_areas_and_cond_masks(negative, noise.shape[2], noise.shape[3], self.device)
calculate_start_end_timesteps(self.model_wrap, negative)
calculate_start_end_timesteps(self.model_wrap, positive)

View File

@ -454,20 +454,26 @@ def load_unet(unet_path): #load unet in diffusers format
sd = comfy.utils.load_torch_file(unet_path)
parameters = comfy.utils.calculate_parameters(sd)
fp16 = model_management.should_use_fp16(model_params=parameters)
if "input_blocks.0.0.weight" in sd: #ldm
model_config = model_detection.model_config_from_unet(sd, "", fp16)
if model_config is None:
raise RuntimeError("ERROR: Could not detect model type of: {}".format(unet_path))
new_sd = sd
model_config = model_detection.model_config_from_diffusers_unet(sd, fp16)
if model_config is None:
print("ERROR UNSUPPORTED UNET", unet_path)
return None
else: #diffusers
model_config = model_detection.model_config_from_diffusers_unet(sd, fp16)
if model_config is None:
print("ERROR UNSUPPORTED UNET", unet_path)
return None
diffusers_keys = comfy.utils.unet_to_diffusers(model_config.unet_config)
diffusers_keys = comfy.utils.unet_to_diffusers(model_config.unet_config)
new_sd = {}
for k in diffusers_keys:
if k in sd:
new_sd[diffusers_keys[k]] = sd.pop(k)
else:
print(diffusers_keys[k], k)
new_sd = {}
for k in diffusers_keys:
if k in sd:
new_sd[diffusers_keys[k]] = sd.pop(k)
else:
print(diffusers_keys[k], k)
offload_device = model_management.unet_offload_device()
model = model_config.get_model(new_sd, "")
model = model.to(offload_device)

View File

@ -60,6 +60,9 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
if dtype is not None:
self.transformer.to(dtype)
self.transformer.text_model.embeddings.token_embedding.to(torch.float32)
self.transformer.text_model.embeddings.position_embedding.to(torch.float32)
self.max_length = max_length
if freeze:
self.freeze()
@ -138,7 +141,7 @@ class SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):
tokens = self.set_up_textual_embeddings(tokens, backup_embeds)
tokens = torch.LongTensor(tokens).to(device)
if backup_embeds.weight.dtype != torch.float32:
if self.transformer.text_model.final_layer_norm.weight.dtype != torch.float32:
precision_scope = torch.autocast
else:
precision_scope = lambda a, b: contextlib.nullcontext(a)

View File

@ -12,16 +12,6 @@ class SD2ClipModel(sd1_clip.SD1ClipModel):
super().__init__(device=device, freeze=freeze, layer=layer, layer_idx=layer_idx, textmodel_json_config=textmodel_json_config, textmodel_path=textmodel_path, dtype=dtype)
self.empty_tokens = [[49406] + [49407] + [0] * 75]
def clip_layer(self, layer_idx):
if layer_idx < 0:
layer_idx -= 1 #The real last layer of SD2.x clip is the penultimate one. The last one might contain garbage.
if abs(layer_idx) >= 24:
self.layer = "hidden"
self.layer_idx = -2
else:
self.layer = "hidden"
self.layer_idx = layer_idx
class SD2Tokenizer(sd1_clip.SD1Tokenizer):
def __init__(self, tokenizer_path=None, embedding_directory=None):
super().__init__(tokenizer_path, pad_with_end=False, embedding_directory=embedding_directory, embedding_size=1024)

View File

@ -846,6 +846,7 @@ class SwinIR(nn.Module):
num_in_ch = in_chans
num_out_ch = in_chans
supports_fp16 = True
self.start_unshuffle = 1
self.model_arch = "SwinIR"
self.sub_type = "SR"
@ -874,6 +875,11 @@ class SwinIR(nn.Module):
else 64
)
if "conv_first.1.weight" in self.state:
self.state["conv_first.weight"] = self.state.pop("conv_first.1.weight")
self.state["conv_first.bias"] = self.state.pop("conv_first.1.bias")
self.start_unshuffle = round(math.sqrt(self.state["conv_first.weight"].shape[1] // 3))
num_in_ch = self.state["conv_first.weight"].shape[1]
in_chans = num_in_ch
if "conv_last.weight" in state_keys:
@ -968,7 +974,7 @@ class SwinIR(nn.Module):
self.depths = depths
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.scale = upscale
self.scale = upscale / self.start_unshuffle
self.upsampler = upsampler
self.img_size = img_size
self.img_range = img_range
@ -1101,6 +1107,9 @@ class SwinIR(nn.Module):
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
if self.upscale == 4:
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
elif self.upscale == 8:
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_up3 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
@ -1157,6 +1166,9 @@ class SwinIR(nn.Module):
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
if self.start_unshuffle > 1:
x = torch.nn.functional.pixel_unshuffle(x, self.start_unshuffle)
if self.upsampler == "pixelshuffle":
# for classical SR
x = self.conv_first(x)
@ -1186,6 +1198,9 @@ class SwinIR(nn.Module):
)
)
)
elif self.upscale == 8:
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.lrelu(self.conv_up3(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
# for image denoising and JPEG compression artifact reduction

View File

@ -27,6 +27,44 @@ class ModelMergeSimple:
m.add_patches({k: kp[k]}, 1.0 - ratio, ratio)
return (m, )
class ModelSubtract:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model1": ("MODEL",),
"model2": ("MODEL",),
"multiplier": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "merge"
CATEGORY = "_for_testing/model_merging"
def merge(self, model1, model2, multiplier):
m = model1.clone()
kp = model2.get_key_patches("diffusion_model.")
for k in kp:
m.add_patches({k: kp[k]}, - multiplier, multiplier)
return (m, )
class ModelAdd:
@classmethod
def INPUT_TYPES(s):
return {"required": { "model1": ("MODEL",),
"model2": ("MODEL",),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "merge"
CATEGORY = "_for_testing/model_merging"
def merge(self, model1, model2):
m = model1.clone()
kp = model2.get_key_patches("diffusion_model.")
for k in kp:
m.add_patches({k: kp[k]}, 1.0, 1.0)
return (m, )
class CLIPMergeSimple:
@classmethod
def INPUT_TYPES(s):
@ -144,6 +182,8 @@ class CheckpointSave:
NODE_CLASS_MAPPINGS = {
"ModelMergeSimple": ModelMergeSimple,
"ModelMergeBlocks": ModelMergeBlocks,
"ModelMergeSubtract": ModelSubtract,
"ModelMergeAdd": ModelAdd,
"CheckpointSave": CheckpointSave,
"CLIPMergeSimple": CLIPMergeSimple,
}

View File

@ -18,6 +18,8 @@ class UpscaleModelLoader:
def load_model(self, model_name):
model_path = folder_paths.get_full_path("upscale_models", model_name)
sd = comfy.utils.load_torch_file(model_path, safe_load=True)
if "module.layers.0.residual_group.blocks.0.norm1.weight" in sd:
sd = comfy.utils.state_dict_prefix_replace(sd, {"module.":""})
out = model_loading.load_state_dict(sd).eval()
return (out, )

View File

@ -21,7 +21,8 @@ def get_input_data(inputs, class_def, unique_id, outputs={}, prompt={}, extra_da
input_unique_id = input_data[0]
output_index = input_data[1]
if input_unique_id not in outputs:
return None
input_data_all[x] = (None,)
continue
obj = outputs[input_unique_id][output_index]
input_data_all[x] = obj
else:

View File

@ -1,14 +1,13 @@
import os
import time
supported_ckpt_extensions = set(['.ckpt', '.pth', '.safetensors'])
supported_pt_extensions = set(['.ckpt', '.pt', '.bin', '.pth', '.safetensors'])
folder_names_and_paths = {}
base_path = os.path.dirname(os.path.realpath(__file__))
models_dir = os.path.join(base_path, "models")
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_ckpt_extensions)
folder_names_and_paths["checkpoints"] = ([os.path.join(models_dir, "checkpoints")], supported_pt_extensions)
folder_names_and_paths["configs"] = ([os.path.join(models_dir, "configs")], [".yaml"])
folder_names_and_paths["loras"] = ([os.path.join(models_dir, "loras")], supported_pt_extensions)
@ -121,17 +120,22 @@ def add_model_folder_path(folder_name, full_folder_path):
def get_folder_paths(folder_name):
return folder_names_and_paths[folder_name][0][:]
def recursive_search(directory):
def recursive_search(directory, excluded_dir_names=None):
if not os.path.isdir(directory):
return [], {}
if excluded_dir_names is None:
excluded_dir_names = []
result = []
dirs = {directory: os.path.getmtime(directory)}
for root, subdir, file in os.walk(directory, followlinks=True):
for filepath in file:
#we os.path,join directory with a blank string to generate a path separator at the end.
result.append(os.path.join(root, filepath).replace(os.path.join(directory,''),''))
for d in subdir:
path = os.path.join(root, d)
for dirpath, subdirs, filenames in os.walk(directory, followlinks=True, topdown=True):
subdirs[:] = [d for d in subdirs if d not in excluded_dir_names]
for file_name in filenames:
relative_path = os.path.relpath(os.path.join(dirpath, file_name), directory)
result.append(relative_path)
for d in subdirs:
path = os.path.join(dirpath, d)
dirs[path] = os.path.getmtime(path)
return result, dirs
@ -159,7 +163,7 @@ def get_filename_list_(folder_name):
folders = folder_names_and_paths[folder_name]
output_folders = {}
for x in folders[0]:
files, folders_all = recursive_search(x)
files, folders_all = recursive_search(x, excluded_dir_names=[".git"])
output_list.update(filter_files_extensions(files, folders[1]))
output_folders = {**output_folders, **folders_all}

View File

@ -1,3 +1,6 @@
import comfy.options
comfy.options.enable_args_parsing()
import os
import importlib.util
import folder_paths
@ -104,6 +107,7 @@ async def run(server, address='', port=8188, verbose=True, call_on_start=None):
def hijack_progress(server):
def hook(value, total, preview_image):
comfy.model_management.throw_exception_if_processing_interrupted()
server.send_sync("progress", {"value": value, "max": total}, server.client_id)
if preview_image is not None:
server.send_sync(BinaryEventTypes.UNENCODED_PREVIEW_IMAGE, preview_image, server.client_id)

View File

@ -159,6 +159,31 @@ class ConditioningSetArea:
c.append(n)
return (c, )
class ConditioningSetAreaPercentage:
@classmethod
def INPUT_TYPES(s):
return {"required": {"conditioning": ("CONDITIONING", ),
"width": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
"height": ("FLOAT", {"default": 1.0, "min": 0, "max": 1.0, "step": 0.01}),
"x": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
"y": ("FLOAT", {"default": 0, "min": 0, "max": 1.0, "step": 0.01}),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "append"
CATEGORY = "conditioning"
def append(self, conditioning, width, height, x, y, strength):
c = []
for t in conditioning:
n = [t[0], t[1].copy()]
n[1]['area'] = ("percentage", height, width, y, x)
n[1]['strength'] = strength
n[1]['set_area_to_bounds'] = False
c.append(n)
return (c, )
class ConditioningSetMask:
@classmethod
def INPUT_TYPES(s):
@ -1583,6 +1608,7 @@ NODE_CLASS_MAPPINGS = {
"ConditioningCombine": ConditioningCombine,
"ConditioningConcat": ConditioningConcat,
"ConditioningSetArea": ConditioningSetArea,
"ConditioningSetAreaPercentage": ConditioningSetAreaPercentage,
"ConditioningSetMask": ConditioningSetMask,
"KSamplerAdvanced": KSamplerAdvanced,
"SetLatentNoiseMask": SetLatentNoiseMask,
@ -1644,6 +1670,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"ConditioningAverage ": "Conditioning (Average)",
"ConditioningConcat": "Conditioning (Concat)",
"ConditioningSetArea": "Conditioning (Set Area)",
"ConditioningSetAreaPercentage": "Conditioning (Set Area with Percentage)",
"ConditioningSetMask": "Conditioning (Set Mask)",
"ControlNetApply": "Apply ControlNet",
"ControlNetApplyAdvanced": "Apply ControlNet (Advanced)",

View File

@ -170,15 +170,15 @@ class PromptServer():
subfolder = post.get("subfolder", "")
full_output_folder = os.path.join(upload_dir, os.path.normpath(subfolder))
filepath = os.path.abspath(os.path.join(full_output_folder, filename))
if os.path.commonpath((upload_dir, os.path.abspath(full_output_folder))) != upload_dir:
if os.path.commonpath((upload_dir, filepath)) != upload_dir:
return web.Response(status=400)
if not os.path.exists(full_output_folder):
os.makedirs(full_output_folder)
split = os.path.splitext(filename)
filepath = os.path.join(full_output_folder, filename)
if overwrite is not None and (overwrite == "true" or overwrite == "1"):
pass
@ -398,7 +398,7 @@ class PromptServer():
info['output_name'] = obj_class.RETURN_NAMES if hasattr(obj_class, 'RETURN_NAMES') else info['output']
info['name'] = node_class
info['display_name'] = nodes.NODE_DISPLAY_NAME_MAPPINGS[node_class] if node_class in nodes.NODE_DISPLAY_NAME_MAPPINGS.keys() else node_class
info['description'] = ''
info['description'] = obj_class.DESCRIPTION if hasattr(obj_class,'DESCRIPTION') else ''
info['category'] = 'sd'
if hasattr(obj_class, 'OUTPUT_NODE') and obj_class.OUTPUT_NODE == True:
info['output_node'] = True
@ -603,7 +603,7 @@ class PromptServer():
await self.send(*msg)
async def start(self, address, port, verbose=True, call_on_start=None):
runner = web.AppRunner(self.app)
runner = web.AppRunner(self.app, access_log=None)
await runner.setup()
site = web.TCPSite(runner, address, port)
await site.start()

View File

@ -142,7 +142,7 @@ app.registerExtension({
const r = origOnNodeCreated ? origOnNodeCreated.apply(this) : undefined;
if (this.widgets) {
for (const w of this.widgets) {
if (w?.options?.forceInput) {
if (w?.options?.forceInput || w?.options?.defaultInput) {
const config = nodeData?.input?.required[w.name] || nodeData?.input?.optional?.[w.name] || [w.type, w.options || {}];
convertToInput(this, w, config);
}

View File

@ -11529,7 +11529,7 @@ LGraphNode.prototype.executeAction = function(action)
if (timeout) {
clearInterval(timeout);
}
timeout = setTimeout(refreshHelper, 250);
timeout = setTimeout(refreshHelper, 10);
return;
}
e.preventDefault();

View File

@ -671,6 +671,10 @@ export class ComfyApp {
*/
#addPasteHandler() {
document.addEventListener("paste", (e) => {
// ctrl+shift+v is used to paste nodes with connections
// this is handled by litegraph
if(this.shiftDown) return;
let data = (e.clipboardData || window.clipboardData);
const items = data.items;
@ -735,9 +739,17 @@ export class ComfyApp {
*/
#addCopyHandler() {
document.addEventListener("copy", (e) => {
// copy
if (e.target.type === "text" || e.target.type === "textarea") {
// Default system copy
return;
}
// copy nodes and clear clipboard
if (this.canvas.selected_nodes) {
this.canvas.copyToClipboard();
this.canvas.copyToClipboard();
e.clipboardData.setData('text', ' '); //clearData doesn't remove images from clipboard
e.preventDefault();
e.stopImmediatePropagation();
return false;
}
});
}
@ -840,24 +852,14 @@ export class ComfyApp {
// Ctrl+C Copy
if ((e.key === 'c') && (e.metaKey || e.ctrlKey)) {
if (e.shiftKey) {
this.copyToClipboard(true);
block_default = true;
}
// Trigger default onCopy
// Trigger onCopy
return true;
}
// Ctrl+V Paste
if ((e.key === 'v') && (e.metaKey || e.ctrlKey)) {
if (e.shiftKey) {
this.pasteFromClipboard(true);
block_default = true;
}
else {
// Trigger default onPaste
return true;
}
if ((e.key === 'v' || e.key == 'V') && (e.metaKey || e.ctrlKey) && !e.shiftKey) {
// Trigger onPaste
return true;
}
}
@ -1248,6 +1250,10 @@ export class ComfyApp {
if (!config.widget.options) config.widget.options = {};
config.widget.options.forceInput = inputData[1].forceInput;
}
if(widgetCreated && inputData[1]?.defaultInput && config?.widget) {
if (!config.widget.options) config.widget.options = {};
config.widget.options.defaultInput = inputData[1].defaultInput;
}
}
for (const o in nodeData["output"]) {

View File

@ -8,8 +8,13 @@ function getNumberDefaults(inputData, defaultStep) {
if (min == undefined) min = 0;
if (max == undefined) max = 2048;
if (step == undefined) step = defaultStep;
// precision is the number of decimal places to show.
// by default, display the the smallest number of decimal places such that changes of size step are visible.
let precision = Math.max(-Math.floor(Math.log10(step)),0)
// by default, round the value to those decimal places shown.
let round = Math.round(1000000*Math.pow(0.1,precision))/1000000;
return { val: defaultVal, config: { min, max, step: 10.0 * step } };
return { val: defaultVal, config: { min, max, step: 10.0 * step, round, precision } };
}
export function addValueControlWidget(node, targetWidget, defaultValue = "randomize", values) {
@ -264,7 +269,10 @@ export const ComfyWidgets = {
FLOAT(node, inputName, inputData, app) {
let widgetType = isSlider(inputData[1]["display"], app);
const { val, config } = getNumberDefaults(inputData, 0.5);
return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) };
return { widget: node.addWidget(widgetType, inputName, val,
function (v) {
this.value = Math.round(v/config.round)*config.round;
}, config) };
},
INT(node, inputName, inputData, app) {
let widgetType = isSlider(inputData[1]["display"], app);
@ -335,7 +343,7 @@ export const ComfyWidgets = {
subfolder = name.substring(0, folder_separator);
name = name.substring(folder_separator + 1);
}
img.src = api.apiURL(`/view?filename=${name}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`);
img.src = api.apiURL(`/view?filename=${encodeURIComponent(name)}&type=input&subfolder=${subfolder}${app.getPreviewFormatParam()}`);
node.setSizeForImage?.();
}