mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-10 21:42:37 +08:00
Merge branch 'comfyanonymous:master' into refactor/execution
This commit is contained in:
commit
859f2f5115
@ -93,8 +93,8 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins
|
||||
|
||||
```pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/rocm5.4.2```
|
||||
|
||||
This is the command to install the nightly with ROCm 5.5 that supports the 7000 series and might have some performance improvements:
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.5 -r requirements.txt```
|
||||
This is the command to install the nightly with ROCm 5.6 that supports the 7000 series and might have some performance improvements:
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm5.6 -r requirements.txt```
|
||||
|
||||
### NVIDIA
|
||||
|
||||
|
||||
@ -170,6 +170,8 @@ def model_lora_keys_clip(model, key_map={}):
|
||||
if k in sdk:
|
||||
lora_key = text_model_lora_key.format(b, LORA_CLIP_MAP[c])
|
||||
key_map[lora_key] = k
|
||||
lora_key = "lora_te1_text_model_encoder_layers_{}_{}".format(b, LORA_CLIP_MAP[c])
|
||||
key_map[lora_key] = k
|
||||
|
||||
k = "clip_l.transformer.text_model.encoder.layers.{}.{}.weight".format(b, c)
|
||||
if k in sdk:
|
||||
|
||||
@ -126,7 +126,8 @@ class SDXLRefiner(supported_models_base.BASE):
|
||||
def process_clip_state_dict_for_saving(self, state_dict):
|
||||
replace_prefix = {}
|
||||
state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
|
||||
state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids")
|
||||
if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g:
|
||||
state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids")
|
||||
replace_prefix["clip_g"] = "conditioner.embedders.0.model"
|
||||
state_dict_g = supported_models_base.state_dict_prefix_replace(state_dict_g, replace_prefix)
|
||||
return state_dict_g
|
||||
@ -171,7 +172,8 @@ class SDXL(supported_models_base.BASE):
|
||||
replace_prefix = {}
|
||||
keys_to_replace = {}
|
||||
state_dict_g = diffusers_convert.convert_text_enc_state_dict_v20(state_dict, "clip_g")
|
||||
state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids")
|
||||
if "clip_g.transformer.text_model.embeddings.position_ids" in state_dict_g:
|
||||
state_dict_g.pop("clip_g.transformer.text_model.embeddings.position_ids")
|
||||
for k in state_dict:
|
||||
if k.startswith("clip_l"):
|
||||
state_dict_g[k] = state_dict[k]
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
import comfy.sd
|
||||
import comfy.utils
|
||||
import comfy.model_base
|
||||
|
||||
import folder_paths
|
||||
import json
|
||||
import os
|
||||
@ -100,6 +102,31 @@ class CheckpointSave:
|
||||
prompt_info = json.dumps(prompt)
|
||||
|
||||
metadata = {"prompt": prompt_info}
|
||||
|
||||
|
||||
enable_modelspec = True
|
||||
if isinstance(model.model, comfy.model_base.SDXL):
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-base"
|
||||
elif isinstance(model.model, comfy.model_base.SDXLRefiner):
|
||||
metadata["modelspec.architecture"] = "stable-diffusion-xl-v1-refiner"
|
||||
else:
|
||||
enable_modelspec = False
|
||||
|
||||
if enable_modelspec:
|
||||
metadata["modelspec.sai_model_spec"] = "1.0.0"
|
||||
metadata["modelspec.implementation"] = "sgm"
|
||||
metadata["modelspec.title"] = "{} {}".format(filename, counter)
|
||||
|
||||
#TODO:
|
||||
# "stable-diffusion-v1", "stable-diffusion-v1-inpainting", "stable-diffusion-v2-512",
|
||||
# "stable-diffusion-v2-768-v", "stable-diffusion-v2-unclip-l", "stable-diffusion-v2-unclip-h",
|
||||
# "v2-inpainting"
|
||||
|
||||
if model.model.model_type == comfy.model_base.ModelType.EPS:
|
||||
metadata["modelspec.predict_key"] = "epsilon"
|
||||
elif model.model.model_type == comfy.model_base.ModelType.V_PREDICTION:
|
||||
metadata["modelspec.predict_key"] = "v"
|
||||
|
||||
if extra_pnginfo is not None:
|
||||
for x in extra_pnginfo:
|
||||
metadata[x] = json.dumps(extra_pnginfo[x])
|
||||
|
||||
@ -37,7 +37,11 @@ def get_gpu_names():
|
||||
return set()
|
||||
|
||||
def cuda_malloc_supported():
|
||||
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M", "GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745"}
|
||||
blacklist = {"GeForce GTX TITAN X", "GeForce GTX 980", "GeForce GTX 970", "GeForce GTX 960", "GeForce GTX 950", "GeForce 945M",
|
||||
"GeForce 940M", "GeForce 930M", "GeForce 920M", "GeForce 910M", "GeForce GTX 750", "GeForce GTX 745", "Quadro K620",
|
||||
"Quadro K1200", "Quadro K2200", "Quadro M500", "Quadro M520", "Quadro M600", "Quadro M620", "Quadro M1000",
|
||||
"Quadro M1200", "Quadro M2000", "Quadro M2200", "Quadro M3000", "Quadro M4000", "Quadro M5000", "Quadro M5500", "Quadro M6000"}
|
||||
|
||||
try:
|
||||
names = get_gpu_names()
|
||||
except:
|
||||
|
||||
4
web/types/comfy.d.ts
vendored
4
web/types/comfy.d.ts
vendored
@ -30,9 +30,7 @@ export interface ComfyExtension {
|
||||
getCustomWidgets(
|
||||
app: ComfyApp
|
||||
): Promise<
|
||||
Array<
|
||||
Record<string, (node, inputName, inputData, app) => { widget?: IWidget; minWidth?: number; minHeight?: number }>
|
||||
>
|
||||
Record<string, (node, inputName, inputData, app) => { widget?: IWidget; minWidth?: number; minHeight?: number }>
|
||||
>;
|
||||
/**
|
||||
* Allows the extension to add additional handling to the node before it is registered with LGraph
|
||||
|
||||
Loading…
Reference in New Issue
Block a user