mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-01-30 16:20:17 +08:00
Compare commits
8 Commits
6e91c0490d
...
8df681b4a2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8df681b4a2 | ||
|
|
1a72bf2046 | ||
|
|
ccfd4088d0 | ||
|
|
9e2f5e41b7 | ||
|
|
5e20b843a5 | ||
|
|
277e46473d | ||
|
|
06c322ad2d | ||
|
|
1d0b1b9d64 |
@ -108,7 +108,7 @@ See what ComfyUI can do with the [example workflows](https://comfyanonymous.gith
|
||||
- [LCM models and Loras](https://comfyanonymous.github.io/ComfyUI_examples/lcm/)
|
||||
- Latent previews with [TAESD](#how-to-show-high-quality-previews)
|
||||
- Works fully offline: core will never download anything unless you want to.
|
||||
- Optional API nodes to use paid models from external providers through the online [Comfy API](https://docs.comfy.org/tutorials/api-nodes/overview).
|
||||
- Optional API nodes to use paid models from external providers through the online [Comfy API](https://docs.comfy.org/tutorials/api-nodes/overview) disable with: `--disable-api-nodes`
|
||||
- [Config file](extra_model_paths.yaml.example) to set the search paths for models.
|
||||
|
||||
Workflow examples can be found on the [Examples page](https://comfyanonymous.github.io/ComfyUI_examples/)
|
||||
@ -212,7 +212,7 @@ Python 3.14 works but you may encounter issues with the torch compile node. The
|
||||
|
||||
Python 3.13 is very well supported. If you have trouble with some custom node dependencies on 3.13 you can try 3.12
|
||||
|
||||
torch 2.4 and above is supported but some features might only work on newer versions. We generally recommend using the latest major version of pytorch with the latest cuda version unless it is less than 2 weeks old.
|
||||
torch 2.4 and above is supported but some features and optimizations might only work on newer versions. We generally recommend using the latest major version of pytorch with the latest cuda version unless it is less than 2 weeks old.
|
||||
|
||||
### Instructions:
|
||||
|
||||
@ -229,7 +229,7 @@ AMD users can install rocm and pytorch with pip if you don't have it already ins
|
||||
|
||||
```pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.4```
|
||||
|
||||
This is the command to install the nightly with ROCm 7.0 which might have some performance improvements:
|
||||
This is the command to install the nightly with ROCm 7.1 which might have some performance improvements:
|
||||
|
||||
```pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/rocm7.1```
|
||||
|
||||
|
||||
@ -216,6 +216,12 @@ parser.add_argument(
|
||||
help="The local filesystem path to the directory where the frontend is located. Overrides --front-end-version.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--legacy-frontend-compat",
|
||||
action="store_true",
|
||||
help="Convert V3 node schemas to V1 format for compatibility with old frontends"
|
||||
)
|
||||
|
||||
parser.add_argument("--user-directory", type=is_valid_directory, default=None, help="Set the ComfyUI user directory with an absolute path. Overrides --base-directory.")
|
||||
|
||||
parser.add_argument("--enable-compress-response-body", action="store_true", help="Enable compressing response body.")
|
||||
|
||||
29
server.py
29
server.py
@ -191,6 +191,33 @@ def create_block_external_middleware():
|
||||
|
||||
return block_external_middleware
|
||||
|
||||
def convert_v3_to_v1_schema(input_types):
|
||||
"""
|
||||
Convert V3 schema (Combo objects) to V1 schema (simple tuples/lists)
|
||||
for compatibility with old frontends
|
||||
"""
|
||||
converted = {}
|
||||
|
||||
for category, inputs in input_types.items():
|
||||
converted[category] = {}
|
||||
|
||||
for input_name, input_spec in inputs.items():
|
||||
|
||||
if isinstance(input_spec, tuple) and len(input_spec) > 1 and input_spec[0] == 'COMBO':
|
||||
first_elem = input_spec[1]
|
||||
if isinstance(first_elem, dict) and 'options' in first_elem:
|
||||
adapted_input_spec = (first_elem['options'], {})
|
||||
first_elem.pop('options')
|
||||
adapted_input_spec[1].update(first_elem)
|
||||
converted[category][input_name] = adapted_input_spec
|
||||
else:
|
||||
# Already V1 format, pass through
|
||||
converted[category][input_name] = input_spec
|
||||
else:
|
||||
# Pass through anything else unchanged
|
||||
converted[category][input_name] = input_spec
|
||||
|
||||
return converted
|
||||
|
||||
class PromptServer():
|
||||
def __init__(self, loop):
|
||||
@ -695,6 +722,8 @@ class PromptServer():
|
||||
for x in nodes.NODE_CLASS_MAPPINGS:
|
||||
try:
|
||||
out[x] = node_info(x)
|
||||
if args.legacy_frontend_compat:
|
||||
out[x]['input'] = convert_v3_to_v1_schema(out[x]['input'])
|
||||
except Exception:
|
||||
logging.error(f"[ERROR] An error occurred while retrieving information for the '{x}' node.")
|
||||
logging.error(traceback.format_exc())
|
||||
|
||||
Loading…
Reference in New Issue
Block a user