mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-26 06:10:15 +08:00
Merge branch 'comfyanonymous:master' into master
This commit is contained in:
commit
d15ce39530
@ -8,11 +8,7 @@ from typing import Callable, Tuple, List
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
from torch.nn.utils import weight_norm
|
|
||||||
from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm
|
from torch.nn.utils.parametrize import remove_parametrizations as remove_weight_norm
|
||||||
# from diffusers.models.modeling_utils import ModelMixin
|
|
||||||
# from diffusers.loaders import FromOriginalModelMixin
|
|
||||||
# from diffusers.configuration_utils import ConfigMixin, register_to_config
|
|
||||||
|
|
||||||
from .music_log_mel import LogMelSpectrogram
|
from .music_log_mel import LogMelSpectrogram
|
||||||
|
|
||||||
@ -259,7 +255,7 @@ class ResBlock1(torch.nn.Module):
|
|||||||
|
|
||||||
self.convs1 = nn.ModuleList(
|
self.convs1 = nn.ModuleList(
|
||||||
[
|
[
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
channels,
|
channels,
|
||||||
channels,
|
channels,
|
||||||
@ -269,7 +265,7 @@ class ResBlock1(torch.nn.Module):
|
|||||||
padding=get_padding(kernel_size, dilation[0]),
|
padding=get_padding(kernel_size, dilation[0]),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
channels,
|
channels,
|
||||||
channels,
|
channels,
|
||||||
@ -279,7 +275,7 @@ class ResBlock1(torch.nn.Module):
|
|||||||
padding=get_padding(kernel_size, dilation[1]),
|
padding=get_padding(kernel_size, dilation[1]),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
channels,
|
channels,
|
||||||
channels,
|
channels,
|
||||||
@ -294,7 +290,7 @@ class ResBlock1(torch.nn.Module):
|
|||||||
|
|
||||||
self.convs2 = nn.ModuleList(
|
self.convs2 = nn.ModuleList(
|
||||||
[
|
[
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
channels,
|
channels,
|
||||||
channels,
|
channels,
|
||||||
@ -304,7 +300,7 @@ class ResBlock1(torch.nn.Module):
|
|||||||
padding=get_padding(kernel_size, 1),
|
padding=get_padding(kernel_size, 1),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
channels,
|
channels,
|
||||||
channels,
|
channels,
|
||||||
@ -314,7 +310,7 @@ class ResBlock1(torch.nn.Module):
|
|||||||
padding=get_padding(kernel_size, 1),
|
padding=get_padding(kernel_size, 1),
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
channels,
|
channels,
|
||||||
channels,
|
channels,
|
||||||
@ -366,7 +362,7 @@ class HiFiGANGenerator(nn.Module):
|
|||||||
prod(upsample_rates) == hop_length
|
prod(upsample_rates) == hop_length
|
||||||
), f"hop_length must be {prod(upsample_rates)}"
|
), f"hop_length must be {prod(upsample_rates)}"
|
||||||
|
|
||||||
self.conv_pre = weight_norm(
|
self.conv_pre = torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
num_mels,
|
num_mels,
|
||||||
upsample_initial_channel,
|
upsample_initial_channel,
|
||||||
@ -386,7 +382,7 @@ class HiFiGANGenerator(nn.Module):
|
|||||||
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
||||||
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
||||||
self.ups.append(
|
self.ups.append(
|
||||||
weight_norm(
|
torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.ConvTranspose1d(
|
ops.ConvTranspose1d(
|
||||||
upsample_initial_channel // (2**i),
|
upsample_initial_channel // (2**i),
|
||||||
upsample_initial_channel // (2 ** (i + 1)),
|
upsample_initial_channel // (2 ** (i + 1)),
|
||||||
@ -421,7 +417,7 @@ class HiFiGANGenerator(nn.Module):
|
|||||||
self.resblocks.append(ResBlock1(ch, k, d))
|
self.resblocks.append(ResBlock1(ch, k, d))
|
||||||
|
|
||||||
self.activation_post = post_activation()
|
self.activation_post = post_activation()
|
||||||
self.conv_post = weight_norm(
|
self.conv_post = torch.nn.utils.parametrizations.weight_norm(
|
||||||
ops.Conv1d(
|
ops.Conv1d(
|
||||||
ch,
|
ch,
|
||||||
1,
|
1,
|
||||||
|
|||||||
@ -75,16 +75,10 @@ class SnakeBeta(nn.Module):
|
|||||||
return x
|
return x
|
||||||
|
|
||||||
def WNConv1d(*args, **kwargs):
|
def WNConv1d(*args, **kwargs):
|
||||||
try:
|
return torch.nn.utils.parametrizations.weight_norm(ops.Conv1d(*args, **kwargs))
|
||||||
return torch.nn.utils.parametrizations.weight_norm(ops.Conv1d(*args, **kwargs))
|
|
||||||
except:
|
|
||||||
return torch.nn.utils.weight_norm(ops.Conv1d(*args, **kwargs)) #support pytorch 2.1 and older
|
|
||||||
|
|
||||||
def WNConvTranspose1d(*args, **kwargs):
|
def WNConvTranspose1d(*args, **kwargs):
|
||||||
try:
|
return torch.nn.utils.parametrizations.weight_norm(ops.ConvTranspose1d(*args, **kwargs))
|
||||||
return torch.nn.utils.parametrizations.weight_norm(ops.ConvTranspose1d(*args, **kwargs))
|
|
||||||
except:
|
|
||||||
return torch.nn.utils.weight_norm(ops.ConvTranspose1d(*args, **kwargs)) #support pytorch 2.1 and older
|
|
||||||
|
|
||||||
def get_activation(activation: Literal["elu", "snake", "none"], antialias=False, channels=None) -> nn.Module:
|
def get_activation(activation: Literal["elu", "snake", "none"], antialias=False, channels=None) -> nn.Module:
|
||||||
if activation == "elu":
|
if activation == "elu":
|
||||||
|
|||||||
@ -671,7 +671,6 @@ class KlingImage2VideoNode(KlingNodeBase):
|
|||||||
negative_prompt=negative_prompt if negative_prompt else None,
|
negative_prompt=negative_prompt if negative_prompt else None,
|
||||||
cfg_scale=cfg_scale,
|
cfg_scale=cfg_scale,
|
||||||
mode=KlingVideoGenMode(mode),
|
mode=KlingVideoGenMode(mode),
|
||||||
aspect_ratio=KlingVideoGenAspectRatio(aspect_ratio),
|
|
||||||
duration=KlingVideoGenDuration(duration),
|
duration=KlingVideoGenDuration(duration),
|
||||||
camera_control=camera_control,
|
camera_control=camera_control,
|
||||||
),
|
),
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import av
|
||||||
import torchaudio
|
import torchaudio
|
||||||
import torch
|
import torch
|
||||||
import comfy.model_management
|
import comfy.model_management
|
||||||
@ -7,7 +8,6 @@ import folder_paths
|
|||||||
import os
|
import os
|
||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
import struct
|
|
||||||
import random
|
import random
|
||||||
import hashlib
|
import hashlib
|
||||||
import node_helpers
|
import node_helpers
|
||||||
@ -90,60 +90,118 @@ class VAEDecodeAudio:
|
|||||||
return ({"waveform": audio, "sample_rate": 44100}, )
|
return ({"waveform": audio, "sample_rate": 44100}, )
|
||||||
|
|
||||||
|
|
||||||
def create_vorbis_comment_block(comment_dict, last_block):
|
def save_audio(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None, quality="128k"):
|
||||||
vendor_string = b'ComfyUI'
|
|
||||||
vendor_length = len(vendor_string)
|
|
||||||
|
|
||||||
comments = []
|
filename_prefix += self.prefix_append
|
||||||
for key, value in comment_dict.items():
|
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
||||||
comment = f"{key}={value}".encode('utf-8')
|
results: list[FileLocator] = []
|
||||||
comments.append(struct.pack('<I', len(comment)) + comment)
|
|
||||||
|
|
||||||
user_comment_list_length = len(comments)
|
# Prepare metadata dictionary
|
||||||
user_comments = b''.join(comments)
|
metadata = {}
|
||||||
|
if not args.disable_metadata:
|
||||||
|
if prompt is not None:
|
||||||
|
metadata["prompt"] = json.dumps(prompt)
|
||||||
|
if extra_pnginfo is not None:
|
||||||
|
for x in extra_pnginfo:
|
||||||
|
metadata[x] = json.dumps(extra_pnginfo[x])
|
||||||
|
|
||||||
comment_data = struct.pack('<I', vendor_length) + vendor_string + struct.pack('<I', user_comment_list_length) + user_comments
|
# Opus supported sample rates
|
||||||
if last_block:
|
OPUS_RATES = [8000, 12000, 16000, 24000, 48000]
|
||||||
id = b'\x84'
|
|
||||||
else:
|
|
||||||
id = b'\x04'
|
|
||||||
comment_block = id + struct.pack('>I', len(comment_data))[1:] + comment_data
|
|
||||||
|
|
||||||
return comment_block
|
for (batch_number, waveform) in enumerate(audio["waveform"].cpu()):
|
||||||
|
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
|
||||||
|
file = f"{filename_with_batch_num}_{counter:05}_.{format}"
|
||||||
|
output_path = os.path.join(full_output_folder, file)
|
||||||
|
|
||||||
def insert_or_replace_vorbis_comment(flac_io, comment_dict):
|
# Use original sample rate initially
|
||||||
if len(comment_dict) == 0:
|
sample_rate = audio["sample_rate"]
|
||||||
return flac_io
|
|
||||||
|
|
||||||
flac_io.seek(4)
|
# Handle Opus sample rate requirements
|
||||||
|
if format == "opus":
|
||||||
|
if sample_rate > 48000:
|
||||||
|
sample_rate = 48000
|
||||||
|
elif sample_rate not in OPUS_RATES:
|
||||||
|
# Find the next highest supported rate
|
||||||
|
for rate in sorted(OPUS_RATES):
|
||||||
|
if rate > sample_rate:
|
||||||
|
sample_rate = rate
|
||||||
|
break
|
||||||
|
if sample_rate not in OPUS_RATES: # Fallback if still not supported
|
||||||
|
sample_rate = 48000
|
||||||
|
|
||||||
blocks = []
|
# Resample if necessary
|
||||||
last_block = False
|
if sample_rate != audio["sample_rate"]:
|
||||||
|
waveform = torchaudio.functional.resample(waveform, audio["sample_rate"], sample_rate)
|
||||||
|
|
||||||
while not last_block:
|
# Create in-memory WAV buffer
|
||||||
header = flac_io.read(4)
|
wav_buffer = io.BytesIO()
|
||||||
last_block = (header[0] & 0x80) != 0
|
torchaudio.save(wav_buffer, waveform, sample_rate, format="WAV")
|
||||||
block_type = header[0] & 0x7F
|
wav_buffer.seek(0) # Rewind for reading
|
||||||
block_length = struct.unpack('>I', b'\x00' + header[1:])[0]
|
|
||||||
block_data = flac_io.read(block_length)
|
|
||||||
|
|
||||||
if block_type == 4 or block_type == 1:
|
# Use PyAV to convert and add metadata
|
||||||
pass
|
input_container = av.open(wav_buffer)
|
||||||
else:
|
|
||||||
header = bytes([(header[0] & (~0x80))]) + header[1:]
|
|
||||||
blocks.append(header + block_data)
|
|
||||||
|
|
||||||
blocks.append(create_vorbis_comment_block(comment_dict, last_block=True))
|
# Create output with specified format
|
||||||
|
output_buffer = io.BytesIO()
|
||||||
|
output_container = av.open(output_buffer, mode='w', format=format)
|
||||||
|
|
||||||
new_flac_io = io.BytesIO()
|
# Set metadata on the container
|
||||||
new_flac_io.write(b'fLaC')
|
for key, value in metadata.items():
|
||||||
for block in blocks:
|
output_container.metadata[key] = value
|
||||||
new_flac_io.write(block)
|
|
||||||
|
|
||||||
new_flac_io.write(flac_io.read())
|
# Set up the output stream with appropriate properties
|
||||||
return new_flac_io
|
input_container.streams.audio[0]
|
||||||
|
if format == "opus":
|
||||||
|
out_stream = output_container.add_stream("libopus", rate=sample_rate)
|
||||||
|
if quality == "64k":
|
||||||
|
out_stream.bit_rate = 64000
|
||||||
|
elif quality == "96k":
|
||||||
|
out_stream.bit_rate = 96000
|
||||||
|
elif quality == "128k":
|
||||||
|
out_stream.bit_rate = 128000
|
||||||
|
elif quality == "192k":
|
||||||
|
out_stream.bit_rate = 192000
|
||||||
|
elif quality == "320k":
|
||||||
|
out_stream.bit_rate = 320000
|
||||||
|
elif format == "mp3":
|
||||||
|
out_stream = output_container.add_stream("libmp3lame", rate=sample_rate)
|
||||||
|
if quality == "V0":
|
||||||
|
#TODO i would really love to support V3 and V5 but there doesn't seem to be a way to set the qscale level, the property below is a bool
|
||||||
|
out_stream.codec_context.qscale = 1
|
||||||
|
elif quality == "128k":
|
||||||
|
out_stream.bit_rate = 128000
|
||||||
|
elif quality == "320k":
|
||||||
|
out_stream.bit_rate = 320000
|
||||||
|
else: #format == "flac":
|
||||||
|
out_stream = output_container.add_stream("flac", rate=sample_rate)
|
||||||
|
|
||||||
|
|
||||||
|
# Copy frames from input to output
|
||||||
|
for frame in input_container.decode(audio=0):
|
||||||
|
frame.pts = None # Let PyAV handle timestamps
|
||||||
|
output_container.mux(out_stream.encode(frame))
|
||||||
|
|
||||||
|
# Flush encoder
|
||||||
|
output_container.mux(out_stream.encode(None))
|
||||||
|
|
||||||
|
# Close containers
|
||||||
|
output_container.close()
|
||||||
|
input_container.close()
|
||||||
|
|
||||||
|
# Write the output to file
|
||||||
|
output_buffer.seek(0)
|
||||||
|
with open(output_path, 'wb') as f:
|
||||||
|
f.write(output_buffer.getbuffer())
|
||||||
|
|
||||||
|
results.append({
|
||||||
|
"filename": file,
|
||||||
|
"subfolder": subfolder,
|
||||||
|
"type": self.type
|
||||||
|
})
|
||||||
|
counter += 1
|
||||||
|
|
||||||
|
return { "ui": { "audio": results } }
|
||||||
|
|
||||||
class SaveAudio:
|
class SaveAudio:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.output_dir = folder_paths.get_output_directory()
|
self.output_dir = folder_paths.get_output_directory()
|
||||||
@ -153,50 +211,70 @@ class SaveAudio:
|
|||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
return {"required": { "audio": ("AUDIO", ),
|
return {"required": { "audio": ("AUDIO", ),
|
||||||
"filename_prefix": ("STRING", {"default": "audio/ComfyUI"})},
|
"filename_prefix": ("STRING", {"default": "audio/ComfyUI"}),
|
||||||
|
},
|
||||||
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN_TYPES = ()
|
RETURN_TYPES = ()
|
||||||
FUNCTION = "save_audio"
|
FUNCTION = "save_flac"
|
||||||
|
|
||||||
OUTPUT_NODE = True
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
CATEGORY = "audio"
|
CATEGORY = "audio"
|
||||||
|
|
||||||
def save_audio(self, audio, filename_prefix="ComfyUI", prompt=None, extra_pnginfo=None):
|
def save_flac(self, audio, filename_prefix="ComfyUI", format="flac", prompt=None, extra_pnginfo=None):
|
||||||
filename_prefix += self.prefix_append
|
return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo)
|
||||||
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
|
|
||||||
results: list[FileLocator] = []
|
|
||||||
|
|
||||||
metadata = {}
|
class SaveAudioMP3:
|
||||||
if not args.disable_metadata:
|
def __init__(self):
|
||||||
if prompt is not None:
|
self.output_dir = folder_paths.get_output_directory()
|
||||||
metadata["prompt"] = json.dumps(prompt)
|
self.type = "output"
|
||||||
if extra_pnginfo is not None:
|
self.prefix_append = ""
|
||||||
for x in extra_pnginfo:
|
|
||||||
metadata[x] = json.dumps(extra_pnginfo[x])
|
|
||||||
|
|
||||||
for (batch_number, waveform) in enumerate(audio["waveform"].cpu()):
|
@classmethod
|
||||||
filename_with_batch_num = filename.replace("%batch_num%", str(batch_number))
|
def INPUT_TYPES(s):
|
||||||
file = f"{filename_with_batch_num}_{counter:05}_.flac"
|
return {"required": { "audio": ("AUDIO", ),
|
||||||
|
"filename_prefix": ("STRING", {"default": "audio/ComfyUI"}),
|
||||||
|
"quality": (["V0", "128k", "320k"], {"default": "V0"}),
|
||||||
|
},
|
||||||
|
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||||
|
}
|
||||||
|
|
||||||
buff = io.BytesIO()
|
RETURN_TYPES = ()
|
||||||
torchaudio.save(buff, waveform, audio["sample_rate"], format="FLAC")
|
FUNCTION = "save_mp3"
|
||||||
|
|
||||||
buff = insert_or_replace_vorbis_comment(buff, metadata)
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
with open(os.path.join(full_output_folder, file), 'wb') as f:
|
CATEGORY = "audio"
|
||||||
f.write(buff.getbuffer())
|
|
||||||
|
|
||||||
results.append({
|
def save_mp3(self, audio, filename_prefix="ComfyUI", format="mp3", prompt=None, extra_pnginfo=None, quality="128k"):
|
||||||
"filename": file,
|
return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality)
|
||||||
"subfolder": subfolder,
|
|
||||||
"type": self.type
|
|
||||||
})
|
|
||||||
counter += 1
|
|
||||||
|
|
||||||
return { "ui": { "audio": results } }
|
class SaveAudioOpus:
|
||||||
|
def __init__(self):
|
||||||
|
self.output_dir = folder_paths.get_output_directory()
|
||||||
|
self.type = "output"
|
||||||
|
self.prefix_append = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": { "audio": ("AUDIO", ),
|
||||||
|
"filename_prefix": ("STRING", {"default": "audio/ComfyUI"}),
|
||||||
|
"quality": (["64k", "96k", "128k", "192k", "320k"], {"default": "128k"}),
|
||||||
|
},
|
||||||
|
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = ()
|
||||||
|
FUNCTION = "save_opus"
|
||||||
|
|
||||||
|
OUTPUT_NODE = True
|
||||||
|
|
||||||
|
CATEGORY = "audio"
|
||||||
|
|
||||||
|
def save_opus(self, audio, filename_prefix="ComfyUI", format="opus", prompt=None, extra_pnginfo=None, quality="V3"):
|
||||||
|
return save_audio(self, audio, filename_prefix, format, prompt, extra_pnginfo, quality)
|
||||||
|
|
||||||
class PreviewAudio(SaveAudio):
|
class PreviewAudio(SaveAudio):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -248,7 +326,20 @@ NODE_CLASS_MAPPINGS = {
|
|||||||
"VAEEncodeAudio": VAEEncodeAudio,
|
"VAEEncodeAudio": VAEEncodeAudio,
|
||||||
"VAEDecodeAudio": VAEDecodeAudio,
|
"VAEDecodeAudio": VAEDecodeAudio,
|
||||||
"SaveAudio": SaveAudio,
|
"SaveAudio": SaveAudio,
|
||||||
|
"SaveAudioMP3": SaveAudioMP3,
|
||||||
|
"SaveAudioOpus": SaveAudioOpus,
|
||||||
"LoadAudio": LoadAudio,
|
"LoadAudio": LoadAudio,
|
||||||
"PreviewAudio": PreviewAudio,
|
"PreviewAudio": PreviewAudio,
|
||||||
"ConditioningStableAudio": ConditioningStableAudio,
|
"ConditioningStableAudio": ConditioningStableAudio,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"EmptyLatentAudio": "Empty Latent Audio",
|
||||||
|
"VAEEncodeAudio": "VAE Encode Audio",
|
||||||
|
"VAEDecodeAudio": "VAE Decode Audio",
|
||||||
|
"PreviewAudio": "Preview Audio",
|
||||||
|
"LoadAudio": "Load Audio",
|
||||||
|
"SaveAudio": "Save Audio (FLAC)",
|
||||||
|
"SaveAudioMP3": "Save Audio (MP3)",
|
||||||
|
"SaveAudioOpus": "Save Audio (Opus)",
|
||||||
|
}
|
||||||
|
|||||||
@ -2,6 +2,10 @@ import nodes
|
|||||||
import folder_paths
|
import folder_paths
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from comfy.comfy_types import IO
|
||||||
|
from comfy_api.input_impl import VideoFromFile
|
||||||
|
|
||||||
|
|
||||||
def normalize_path(path):
|
def normalize_path(path):
|
||||||
return path.replace('\\', '/')
|
return path.replace('\\', '/')
|
||||||
|
|
||||||
@ -21,8 +25,8 @@ class Load3D():
|
|||||||
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
||||||
}}
|
}}
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA")
|
RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO)
|
||||||
RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info")
|
RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "lineart", "camera_info", "recording_video")
|
||||||
|
|
||||||
FUNCTION = "process"
|
FUNCTION = "process"
|
||||||
EXPERIMENTAL = True
|
EXPERIMENTAL = True
|
||||||
@ -41,7 +45,14 @@ class Load3D():
|
|||||||
normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
|
normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
|
||||||
lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path)
|
lineart_image, ignore_mask3 = load_image_node.load_image(image=lineart_path)
|
||||||
|
|
||||||
return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info']
|
video = None
|
||||||
|
|
||||||
|
if image['recording'] != "":
|
||||||
|
recording_video_path = folder_paths.get_annotated_filepath(image['recording'])
|
||||||
|
|
||||||
|
video = VideoFromFile(recording_video_path)
|
||||||
|
|
||||||
|
return output_image, output_mask, model_file, normal_image, lineart_image, image['camera_info'], video
|
||||||
|
|
||||||
class Load3DAnimation():
|
class Load3DAnimation():
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -59,8 +70,8 @@ class Load3DAnimation():
|
|||||||
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
"height": ("INT", {"default": 1024, "min": 1, "max": 4096, "step": 1}),
|
||||||
}}
|
}}
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA")
|
RETURN_TYPES = ("IMAGE", "MASK", "STRING", "IMAGE", "LOAD3D_CAMERA", IO.VIDEO)
|
||||||
RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info")
|
RETURN_NAMES = ("image", "mask", "mesh_path", "normal", "camera_info", "recording_video")
|
||||||
|
|
||||||
FUNCTION = "process"
|
FUNCTION = "process"
|
||||||
EXPERIMENTAL = True
|
EXPERIMENTAL = True
|
||||||
@ -77,7 +88,14 @@ class Load3DAnimation():
|
|||||||
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
|
ignore_image, output_mask = load_image_node.load_image(image=mask_path)
|
||||||
normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
|
normal_image, ignore_mask2 = load_image_node.load_image(image=normal_path)
|
||||||
|
|
||||||
return output_image, output_mask, model_file, normal_image, image['camera_info']
|
video = None
|
||||||
|
|
||||||
|
if image['recording'] != "":
|
||||||
|
recording_video_path = folder_paths.get_annotated_filepath(image['recording'])
|
||||||
|
|
||||||
|
video = VideoFromFile(recording_video_path)
|
||||||
|
|
||||||
|
return output_image, output_mask, model_file, normal_image, image['camera_info'], video
|
||||||
|
|
||||||
class Preview3D():
|
class Preview3D():
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|||||||
322
comfy_extras/nodes_string.py
Normal file
322
comfy_extras/nodes_string.py
Normal file
@ -0,0 +1,322 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
|
from comfy.comfy_types.node_typing import IO
|
||||||
|
|
||||||
|
class StringConcatenate():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string_a": (IO.STRING, {"multiline": True}),
|
||||||
|
"string_b": (IO.STRING, {"multiline": True})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.STRING,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string_a, string_b, **kwargs):
|
||||||
|
return string_a + string_b,
|
||||||
|
|
||||||
|
class StringSubstring():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"start": (IO.INT, {}),
|
||||||
|
"end": (IO.INT, {}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.STRING,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, start, end, **kwargs):
|
||||||
|
return string[start:end],
|
||||||
|
|
||||||
|
class StringLength():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.INT,)
|
||||||
|
RETURN_NAMES = ("length",)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, **kwargs):
|
||||||
|
length = len(string)
|
||||||
|
|
||||||
|
return length,
|
||||||
|
|
||||||
|
class CaseConverter():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"mode": (IO.COMBO, {"options": ["UPPERCASE", "lowercase", "Capitalize", "Title Case"]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.STRING,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, mode, **kwargs):
|
||||||
|
if mode == "UPPERCASE":
|
||||||
|
result = string.upper()
|
||||||
|
elif mode == "lowercase":
|
||||||
|
result = string.lower()
|
||||||
|
elif mode == "Capitalize":
|
||||||
|
result = string.capitalize()
|
||||||
|
elif mode == "Title Case":
|
||||||
|
result = string.title()
|
||||||
|
else:
|
||||||
|
result = string
|
||||||
|
|
||||||
|
return result,
|
||||||
|
|
||||||
|
|
||||||
|
class StringTrim():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"mode": (IO.COMBO, {"options": ["Both", "Left", "Right"]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.STRING,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, mode, **kwargs):
|
||||||
|
if mode == "Both":
|
||||||
|
result = string.strip()
|
||||||
|
elif mode == "Left":
|
||||||
|
result = string.lstrip()
|
||||||
|
elif mode == "Right":
|
||||||
|
result = string.rstrip()
|
||||||
|
else:
|
||||||
|
result = string
|
||||||
|
|
||||||
|
return result,
|
||||||
|
|
||||||
|
class StringReplace():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"find": (IO.STRING, {"multiline": True}),
|
||||||
|
"replace": (IO.STRING, {"multiline": True})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.STRING,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, find, replace, **kwargs):
|
||||||
|
result = string.replace(find, replace)
|
||||||
|
return result,
|
||||||
|
|
||||||
|
|
||||||
|
class StringContains():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"substring": (IO.STRING, {"multiline": True}),
|
||||||
|
"case_sensitive": (IO.BOOLEAN, {"default": True})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.BOOLEAN,)
|
||||||
|
RETURN_NAMES = ("contains",)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, substring, case_sensitive, **kwargs):
|
||||||
|
if case_sensitive:
|
||||||
|
contains = substring in string
|
||||||
|
else:
|
||||||
|
contains = substring.lower() in string.lower()
|
||||||
|
|
||||||
|
return contains,
|
||||||
|
|
||||||
|
|
||||||
|
class StringCompare():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string_a": (IO.STRING, {"multiline": True}),
|
||||||
|
"string_b": (IO.STRING, {"multiline": True}),
|
||||||
|
"mode": (IO.COMBO, {"options": ["Starts With", "Ends With", "Equal"]}),
|
||||||
|
"case_sensitive": (IO.BOOLEAN, {"default": True})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.BOOLEAN,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string_a, string_b, mode, case_sensitive, **kwargs):
|
||||||
|
if case_sensitive:
|
||||||
|
a = string_a
|
||||||
|
b = string_b
|
||||||
|
else:
|
||||||
|
a = string_a.lower()
|
||||||
|
b = string_b.lower()
|
||||||
|
|
||||||
|
if mode == "Equal":
|
||||||
|
return a == b,
|
||||||
|
elif mode == "Starts With":
|
||||||
|
return a.startswith(b),
|
||||||
|
elif mode == "Ends With":
|
||||||
|
return a.endswith(b),
|
||||||
|
|
||||||
|
class RegexMatch():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"regex_pattern": (IO.STRING, {"multiline": True}),
|
||||||
|
"case_insensitive": (IO.BOOLEAN, {"default": True}),
|
||||||
|
"multiline": (IO.BOOLEAN, {"default": False}),
|
||||||
|
"dotall": (IO.BOOLEAN, {"default": False})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.BOOLEAN,)
|
||||||
|
RETURN_NAMES = ("matches",)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, regex_pattern, case_insensitive, multiline, dotall, **kwargs):
|
||||||
|
flags = 0
|
||||||
|
|
||||||
|
if case_insensitive:
|
||||||
|
flags |= re.IGNORECASE
|
||||||
|
if multiline:
|
||||||
|
flags |= re.MULTILINE
|
||||||
|
if dotall:
|
||||||
|
flags |= re.DOTALL
|
||||||
|
|
||||||
|
try:
|
||||||
|
match = re.search(regex_pattern, string, flags)
|
||||||
|
result = match is not None
|
||||||
|
|
||||||
|
except re.error:
|
||||||
|
result = False
|
||||||
|
|
||||||
|
return result,
|
||||||
|
|
||||||
|
|
||||||
|
class RegexExtract():
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"string": (IO.STRING, {"multiline": True}),
|
||||||
|
"regex_pattern": (IO.STRING, {"multiline": True}),
|
||||||
|
"mode": (IO.COMBO, {"options": ["First Match", "All Matches", "First Group", "All Groups"]}),
|
||||||
|
"case_insensitive": (IO.BOOLEAN, {"default": True}),
|
||||||
|
"multiline": (IO.BOOLEAN, {"default": False}),
|
||||||
|
"dotall": (IO.BOOLEAN, {"default": False}),
|
||||||
|
"group_index": (IO.INT, {"default": 1, "min": 0, "max": 100})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RETURN_TYPES = (IO.STRING,)
|
||||||
|
FUNCTION = "execute"
|
||||||
|
CATEGORY = "utils/string"
|
||||||
|
|
||||||
|
def execute(self, string, regex_pattern, mode, case_insensitive, multiline, dotall, group_index, **kwargs):
|
||||||
|
join_delimiter = "\n"
|
||||||
|
|
||||||
|
flags = 0
|
||||||
|
if case_insensitive:
|
||||||
|
flags |= re.IGNORECASE
|
||||||
|
if multiline:
|
||||||
|
flags |= re.MULTILINE
|
||||||
|
if dotall:
|
||||||
|
flags |= re.DOTALL
|
||||||
|
|
||||||
|
try:
|
||||||
|
if mode == "First Match":
|
||||||
|
match = re.search(regex_pattern, string, flags)
|
||||||
|
if match:
|
||||||
|
result = match.group(0)
|
||||||
|
else:
|
||||||
|
result = ""
|
||||||
|
|
||||||
|
elif mode == "All Matches":
|
||||||
|
matches = re.findall(regex_pattern, string, flags)
|
||||||
|
if matches:
|
||||||
|
if isinstance(matches[0], tuple):
|
||||||
|
result = join_delimiter.join([m[0] for m in matches])
|
||||||
|
else:
|
||||||
|
result = join_delimiter.join(matches)
|
||||||
|
else:
|
||||||
|
result = ""
|
||||||
|
|
||||||
|
elif mode == "First Group":
|
||||||
|
match = re.search(regex_pattern, string, flags)
|
||||||
|
if match and len(match.groups()) >= group_index:
|
||||||
|
result = match.group(group_index)
|
||||||
|
else:
|
||||||
|
result = ""
|
||||||
|
|
||||||
|
elif mode == "All Groups":
|
||||||
|
matches = re.finditer(regex_pattern, string, flags)
|
||||||
|
results = []
|
||||||
|
for match in matches:
|
||||||
|
if match.groups() and len(match.groups()) >= group_index:
|
||||||
|
results.append(match.group(group_index))
|
||||||
|
result = join_delimiter.join(results)
|
||||||
|
else:
|
||||||
|
result = ""
|
||||||
|
|
||||||
|
except re.error:
|
||||||
|
result = ""
|
||||||
|
|
||||||
|
return result,
|
||||||
|
|
||||||
|
NODE_CLASS_MAPPINGS = {
|
||||||
|
"StringConcatenate": StringConcatenate,
|
||||||
|
"StringSubstring": StringSubstring,
|
||||||
|
"StringLength": StringLength,
|
||||||
|
"CaseConverter": CaseConverter,
|
||||||
|
"StringTrim": StringTrim,
|
||||||
|
"StringReplace": StringReplace,
|
||||||
|
"StringContains": StringContains,
|
||||||
|
"StringCompare": StringCompare,
|
||||||
|
"RegexMatch": RegexMatch,
|
||||||
|
"RegexExtract": RegexExtract
|
||||||
|
}
|
||||||
|
|
||||||
|
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||||
|
"StringConcatenate": "Concatenate",
|
||||||
|
"StringSubstring": "Substring",
|
||||||
|
"StringLength": "Length",
|
||||||
|
"CaseConverter": "Case Converter",
|
||||||
|
"StringTrim": "Trim",
|
||||||
|
"StringReplace": "Replace",
|
||||||
|
"StringContains": "Contains",
|
||||||
|
"StringCompare": "Compare",
|
||||||
|
"RegexMatch": "Regex Match",
|
||||||
|
"RegexExtract": "Regex Extract"
|
||||||
|
}
|
||||||
@ -1,3 +1,3 @@
|
|||||||
# This file is automatically generated by the build process when version is
|
# This file is automatically generated by the build process when version is
|
||||||
# updated in pyproject.toml.
|
# updated in pyproject.toml.
|
||||||
__version__ = "0.3.33"
|
__version__ = "0.3.34"
|
||||||
|
|||||||
1
nodes.py
1
nodes.py
@ -2263,6 +2263,7 @@ def init_builtin_extra_nodes():
|
|||||||
"nodes_fresca.py",
|
"nodes_fresca.py",
|
||||||
"nodes_preview_any.py",
|
"nodes_preview_any.py",
|
||||||
"nodes_ace.py",
|
"nodes_ace.py",
|
||||||
|
"nodes_string.py",
|
||||||
]
|
]
|
||||||
|
|
||||||
import_failed = []
|
import_failed = []
|
||||||
|
|||||||
@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "ComfyUI"
|
name = "ComfyUI"
|
||||||
version = "0.3.33"
|
version = "0.3.34"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { file = "LICENSE" }
|
license = { file = "LICENSE" }
|
||||||
requires-python = ">=3.9"
|
requires-python = ">=3.9"
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
comfyui-frontend-package==1.18.10
|
comfyui-frontend-package==1.19.9
|
||||||
comfyui-workflow-templates==0.1.14
|
comfyui-workflow-templates==0.1.14
|
||||||
torch
|
torch
|
||||||
torchsde
|
torchsde
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user