mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-10 13:32:36 +08:00
Merge branch 'comfyanonymous:master' into fix/secure-combo
This commit is contained in:
commit
e424c34d55
@ -126,10 +126,10 @@ After this you should have everything installed and can proceed to running Comfy
|
|||||||
|
|
||||||
You can install ComfyUI in Apple Mac silicon (M1 or M2) with any recent macOS version.
|
You can install ComfyUI in Apple Mac silicon (M1 or M2) with any recent macOS version.
|
||||||
|
|
||||||
1. Install pytorch. For instructions, read the [Accelerated PyTorch training on Mac](https://developer.apple.com/metal/pytorch/) Apple Developer guide.
|
1. Install pytorch nightly. For instructions, read the [Accelerated PyTorch training on Mac](https://developer.apple.com/metal/pytorch/) Apple Developer guide (make sure to install the latest pytorch nightly).
|
||||||
1. Follow the [ComfyUI manual installation](#manual-install-windows-linux) instructions for Windows and Linux.
|
1. Follow the [ComfyUI manual installation](#manual-install-windows-linux) instructions for Windows and Linux.
|
||||||
1. Install the ComfyUI [dependencies](#dependencies). If you have another Stable Diffusion UI [you might be able to reuse the dependencies](#i-already-have-another-ui-for-stable-diffusion-installed-do-i-really-have-to-install-all-of-these-dependencies).
|
1. Install the ComfyUI [dependencies](#dependencies). If you have another Stable Diffusion UI [you might be able to reuse the dependencies](#i-already-have-another-ui-for-stable-diffusion-installed-do-i-really-have-to-install-all-of-these-dependencies).
|
||||||
1. Launch ComfyUI by running `python main.py`.
|
1. Launch ComfyUI by running `python main.py --force-fp16`. Note that --force-fp16 will only work if you installed the latest pytorch nightly.
|
||||||
|
|
||||||
> **Note**: Remember to add your models, VAE, LoRAs etc. to the corresponding Comfy folders, as discussed in [ComfyUI manual installation](#manual-install-windows-linux).
|
> **Note**: Remember to add your models, VAE, LoRAs etc. to the corresponding Comfy folders, as discussed in [ComfyUI manual installation](#manual-install-windows-linux).
|
||||||
|
|
||||||
|
|||||||
@ -364,7 +364,8 @@ def text_encoder_device():
|
|||||||
if args.gpu_only:
|
if args.gpu_only:
|
||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
|
elif vram_state == VRAMState.HIGH_VRAM or vram_state == VRAMState.NORMAL_VRAM:
|
||||||
if torch.get_num_threads() < 4: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough.
|
#NOTE: on a Ryzen 5 7600X with 4080 it's faster to shift to GPU
|
||||||
|
if torch.get_num_threads() < 8: #leaving the text encoder on the CPU is faster than shifting it if the CPU is fast enough.
|
||||||
return get_torch_device()
|
return get_torch_device()
|
||||||
else:
|
else:
|
||||||
return torch.device("cpu")
|
return torch.device("cpu")
|
||||||
|
|||||||
2
main.py
2
main.py
@ -160,6 +160,8 @@ if __name__ == "__main__":
|
|||||||
if args.auto_launch:
|
if args.auto_launch:
|
||||||
def startup_server(address, port):
|
def startup_server(address, port):
|
||||||
import webbrowser
|
import webbrowser
|
||||||
|
if os.name == 'nt' and address == '0.0.0.0':
|
||||||
|
address = '127.0.0.1'
|
||||||
webbrowser.open(f"http://{address}:{port}")
|
webbrowser.open(f"http://{address}:{port}")
|
||||||
call_on_start = startup_server
|
call_on_start = startup_server
|
||||||
|
|
||||||
|
|||||||
10
nodes.py
10
nodes.py
@ -362,6 +362,14 @@ class SaveLatent:
|
|||||||
metadata[x] = json.dumps(extra_pnginfo[x])
|
metadata[x] = json.dumps(extra_pnginfo[x])
|
||||||
|
|
||||||
file = f"{filename}_{counter:05}_.latent"
|
file = f"{filename}_{counter:05}_.latent"
|
||||||
|
|
||||||
|
results = list()
|
||||||
|
results.append({
|
||||||
|
"filename": file,
|
||||||
|
"subfolder": subfolder,
|
||||||
|
"type": "output"
|
||||||
|
})
|
||||||
|
|
||||||
file = os.path.join(full_output_folder, file)
|
file = os.path.join(full_output_folder, file)
|
||||||
|
|
||||||
output = {}
|
output = {}
|
||||||
@ -369,7 +377,7 @@ class SaveLatent:
|
|||||||
output["latent_format_version_0"] = torch.tensor([])
|
output["latent_format_version_0"] = torch.tensor([])
|
||||||
|
|
||||||
comfy.utils.save_torch_file(output, file, metadata=metadata)
|
comfy.utils.save_torch_file(output, file, metadata=metadata)
|
||||||
return {}
|
return { "ui": { "latents": results } }
|
||||||
|
|
||||||
|
|
||||||
class LoadLatent:
|
class LoadLatent:
|
||||||
|
|||||||
@ -250,19 +250,25 @@ function addMultilineWidget(node, name, opts, app) {
|
|||||||
return { minWidth: 400, minHeight: 200, widget };
|
return { minWidth: 400, minHeight: 200, widget };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isSlider(display) {
|
||||||
|
return (display==="slider") ? "slider" : "number"
|
||||||
|
}
|
||||||
|
|
||||||
export const ComfyWidgets = {
|
export const ComfyWidgets = {
|
||||||
"INT:seed": seedWidget,
|
"INT:seed": seedWidget,
|
||||||
"INT:noise_seed": seedWidget,
|
"INT:noise_seed": seedWidget,
|
||||||
FLOAT(node, inputName, inputData) {
|
FLOAT(node, inputName, inputData) {
|
||||||
|
let widgetType = isSlider(inputData[1]["display"]);
|
||||||
const { val, config } = getNumberDefaults(inputData, 0.5);
|
const { val, config } = getNumberDefaults(inputData, 0.5);
|
||||||
return { widget: node.addWidget("number", inputName, val, () => {}, config) };
|
return { widget: node.addWidget(widgetType, inputName, val, () => {}, config) };
|
||||||
},
|
},
|
||||||
INT(node, inputName, inputData) {
|
INT(node, inputName, inputData) {
|
||||||
|
let widgetType = isSlider(inputData[1]["display"]);
|
||||||
const { val, config } = getNumberDefaults(inputData, 1);
|
const { val, config } = getNumberDefaults(inputData, 1);
|
||||||
Object.assign(config, { precision: 0 });
|
Object.assign(config, { precision: 0 });
|
||||||
return {
|
return {
|
||||||
widget: node.addWidget(
|
widget: node.addWidget(
|
||||||
"number",
|
widgetType,
|
||||||
inputName,
|
inputName,
|
||||||
val,
|
val,
|
||||||
function (v) {
|
function (v) {
|
||||||
@ -273,6 +279,18 @@ export const ComfyWidgets = {
|
|||||||
),
|
),
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
BOOLEAN(node, inputName, inputData) {
|
||||||
|
let defaultVal = inputData[1]["default"];
|
||||||
|
return {
|
||||||
|
widget: node.addWidget(
|
||||||
|
"toggle",
|
||||||
|
inputName,
|
||||||
|
defaultVal,
|
||||||
|
() => {},
|
||||||
|
{"on": inputData[1].label_on, "off": inputData[1].label_off}
|
||||||
|
)
|
||||||
|
};
|
||||||
|
},
|
||||||
STRING(node, inputName, inputData, app) {
|
STRING(node, inputName, inputData, app) {
|
||||||
const defaultVal = inputData[1].default || "";
|
const defaultVal = inputData[1].default || "";
|
||||||
const multiline = !!inputData[1].multiline;
|
const multiline = !!inputData[1].multiline;
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user