mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2025-12-17 01:52:59 +08:00
Merge branch 'master' into dr-support-pip-cm
This commit is contained in:
commit
47436c59d7
@ -3,10 +3,13 @@ https://www.amd.com/en/resources/support-articles/release-notes/RN-AMDGPU-WINDOW
|
||||
|
||||
HOW TO RUN:
|
||||
|
||||
if you have a AMD gpu:
|
||||
If you have a AMD gpu:
|
||||
|
||||
run_amd_gpu.bat
|
||||
|
||||
If you have memory issues you can try disabling the smart memory management by running comfyui with:
|
||||
|
||||
run_amd_gpu_disable_smart_memory.bat
|
||||
|
||||
IF YOU GET A RED ERROR IN THE UI MAKE SURE YOU HAVE A MODEL/CHECKPOINT IN: ComfyUI\models\checkpoints
|
||||
|
||||
|
||||
2
.ci/windows_amd_base_files/run_amd_gpu_disable_smart_memory.bat
Executable file
2
.ci/windows_amd_base_files/run_amd_gpu_disable_smart_memory.bat
Executable file
@ -0,0 +1,2 @@
|
||||
.\python_embeded\python.exe -s ComfyUI\main.py --windows-standalone-build --disable-smart-memory
|
||||
pause
|
||||
25
.github/workflows/ruff.yml
vendored
25
.github/workflows/ruff.yml
vendored
@ -21,3 +21,28 @@ jobs:
|
||||
|
||||
- name: Run Ruff
|
||||
run: ruff check .
|
||||
|
||||
pylint:
|
||||
name: Run Pylint
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Install requirements
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Install Pylint
|
||||
run: pip install pylint
|
||||
|
||||
- name: Run Pylint
|
||||
run: pylint comfy_api_nodes
|
||||
|
||||
1
comfy_api_nodes/apis/__init__.py
generated
1
comfy_api_nodes/apis/__init__.py
generated
@ -2,6 +2,7 @@
|
||||
# filename: filtered-openapi.yaml
|
||||
# timestamp: 2025-07-30T08:54:00+00:00
|
||||
|
||||
# pylint: disable
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date, datetime
|
||||
|
||||
@ -95,6 +95,7 @@ import aiohttp
|
||||
import asyncio
|
||||
import logging
|
||||
import io
|
||||
import os
|
||||
import socket
|
||||
from aiohttp.client_exceptions import ClientError, ClientResponseError
|
||||
from typing import Dict, Type, Optional, Any, TypeVar, Generic, Callable, Tuple
|
||||
@ -499,7 +500,9 @@ class ApiClient:
|
||||
else:
|
||||
raise ValueError("File must be BytesIO or str path")
|
||||
|
||||
operation_id = f"upload_{upload_url.split('/')[-1]}_{uuid.uuid4().hex[:8]}"
|
||||
parsed = urlparse(upload_url)
|
||||
basename = os.path.basename(parsed.path) or parsed.netloc or "upload"
|
||||
operation_id = f"upload_{basename}_{uuid.uuid4().hex[:8]}"
|
||||
request_logger.log_request_response(
|
||||
operation_id=operation_id,
|
||||
request_method="PUT",
|
||||
@ -532,7 +535,7 @@ class ApiClient:
|
||||
request_method="PUT",
|
||||
request_url=upload_url,
|
||||
response_status_code=e.status if hasattr(e, "status") else None,
|
||||
response_headers=dict(e.headers) if getattr(e, "headers") else None,
|
||||
response_headers=dict(e.headers) if hasattr(e, "headers") else None,
|
||||
response_content=None,
|
||||
error_message=f"{type(e).__name__}: {str(e)}",
|
||||
)
|
||||
|
||||
@ -4,16 +4,18 @@ import os
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import hashlib
|
||||
from typing import Any
|
||||
|
||||
import folder_paths
|
||||
|
||||
# Get the logger instance
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_log_directory():
|
||||
"""
|
||||
Ensures the API log directory exists within ComfyUI's temp directory
|
||||
and returns its path.
|
||||
"""
|
||||
"""Ensures the API log directory exists within ComfyUI's temp directory and returns its path."""
|
||||
base_temp_dir = folder_paths.get_temp_directory()
|
||||
log_dir = os.path.join(base_temp_dir, "api_logs")
|
||||
try:
|
||||
@ -24,42 +26,77 @@ def get_log_directory():
|
||||
return base_temp_dir
|
||||
return log_dir
|
||||
|
||||
def _format_data_for_logging(data):
|
||||
|
||||
def _sanitize_filename_component(name: str) -> str:
|
||||
if not name:
|
||||
return "log"
|
||||
sanitized = re.sub(r"[^A-Za-z0-9._-]+", "_", name) # Replace disallowed characters with underscore
|
||||
sanitized = sanitized.strip(" ._") # Windows: trailing dots or spaces are not allowed
|
||||
if not sanitized:
|
||||
sanitized = "log"
|
||||
return sanitized
|
||||
|
||||
|
||||
def _short_hash(*parts: str, length: int = 10) -> str:
|
||||
return hashlib.sha1(("|".join(parts)).encode("utf-8")).hexdigest()[:length]
|
||||
|
||||
|
||||
def _build_log_filepath(log_dir: str, operation_id: str, request_url: str) -> str:
|
||||
"""Build log filepath. We keep it well under common path length limits aiming for <= 240 characters total."""
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
||||
slug = _sanitize_filename_component(operation_id) # Best-effort human-readable slug from operation_id
|
||||
h = _short_hash(operation_id or "", request_url or "") # Short hash ties log to the full operation and URL
|
||||
|
||||
# Compute how much room we have for the slug given the directory length
|
||||
# Keep total path length reasonably below ~260 on Windows.
|
||||
max_total_path = 240
|
||||
prefix = f"{timestamp}_"
|
||||
suffix = f"_{h}.log"
|
||||
if not slug:
|
||||
slug = "op"
|
||||
max_filename_len = max(60, max_total_path - len(log_dir) - 1)
|
||||
max_slug_len = max(8, max_filename_len - len(prefix) - len(suffix))
|
||||
if len(slug) > max_slug_len:
|
||||
slug = slug[:max_slug_len].rstrip(" ._-")
|
||||
return os.path.join(log_dir, f"{prefix}{slug}{suffix}")
|
||||
|
||||
|
||||
def _format_data_for_logging(data: Any) -> str:
|
||||
"""Helper to format data (dict, str, bytes) for logging."""
|
||||
if isinstance(data, bytes):
|
||||
try:
|
||||
return data.decode('utf-8') # Try to decode as text
|
||||
return data.decode("utf-8") # Try to decode as text
|
||||
except UnicodeDecodeError:
|
||||
return f"[Binary data of length {len(data)} bytes]"
|
||||
elif isinstance(data, (dict, list)):
|
||||
try:
|
||||
return json.dumps(data, indent=2, ensure_ascii=False)
|
||||
except TypeError:
|
||||
return str(data) # Fallback for non-serializable objects
|
||||
return str(data) # Fallback for non-serializable objects
|
||||
return str(data)
|
||||
|
||||
|
||||
def log_request_response(
|
||||
operation_id: str,
|
||||
request_method: str,
|
||||
request_url: str,
|
||||
request_headers: dict | None = None,
|
||||
request_params: dict | None = None,
|
||||
request_data: any = None,
|
||||
request_data: Any = None,
|
||||
response_status_code: int | None = None,
|
||||
response_headers: dict | None = None,
|
||||
response_content: any = None,
|
||||
error_message: str | None = None
|
||||
response_content: Any = None,
|
||||
error_message: str | None = None,
|
||||
):
|
||||
"""
|
||||
Logs API request and response details to a file in the temp/api_logs directory.
|
||||
Filenames are sanitized and length-limited for cross-platform safety.
|
||||
If we still fail to write, we fall back to appending into api.log.
|
||||
"""
|
||||
log_dir = get_log_directory()
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
|
||||
filename = f"{timestamp}_{operation_id.replace('/', '_').replace(':', '_')}.log"
|
||||
filepath = os.path.join(log_dir, filename)
|
||||
|
||||
log_content = []
|
||||
filepath = _build_log_filepath(log_dir, operation_id, request_url)
|
||||
|
||||
log_content: list[str] = []
|
||||
log_content.append(f"Timestamp: {datetime.datetime.now().isoformat()}")
|
||||
log_content.append(f"Operation ID: {operation_id}")
|
||||
log_content.append("-" * 30 + " REQUEST " + "-" * 30)
|
||||
@ -69,7 +106,7 @@ def log_request_response(
|
||||
log_content.append(f"Headers:\n{_format_data_for_logging(request_headers)}")
|
||||
if request_params:
|
||||
log_content.append(f"Params:\n{_format_data_for_logging(request_params)}")
|
||||
if request_data:
|
||||
if request_data is not None:
|
||||
log_content.append(f"Data/Body:\n{_format_data_for_logging(request_data)}")
|
||||
|
||||
log_content.append("\n" + "-" * 30 + " RESPONSE " + "-" * 30)
|
||||
@ -77,7 +114,7 @@ def log_request_response(
|
||||
log_content.append(f"Status Code: {response_status_code}")
|
||||
if response_headers:
|
||||
log_content.append(f"Headers:\n{_format_data_for_logging(response_headers)}")
|
||||
if response_content:
|
||||
if response_content is not None:
|
||||
log_content.append(f"Content:\n{_format_data_for_logging(response_content)}")
|
||||
if error_message:
|
||||
log_content.append(f"Error:\n{error_message}")
|
||||
@ -89,6 +126,7 @@ def log_request_response(
|
||||
except Exception as e:
|
||||
logger.error(f"Error writing API log to {filepath}: {e}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Example usage (for testing the logger directly)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
@ -52,7 +52,3 @@ class RodinResourceItem(BaseModel):
|
||||
|
||||
class Rodin3DDownloadResponse(BaseModel):
|
||||
list: List[RodinResourceItem] = Field(..., description="Source List")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@ -1,24 +1,34 @@
|
||||
import torch
|
||||
import comfy.model_management
|
||||
from typing_extensions import override
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
from kornia.morphology import dilation, erosion, opening, closing, gradient, top_hat, bottom_hat
|
||||
import kornia.color
|
||||
|
||||
|
||||
class Morphology:
|
||||
class Morphology(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"image": ("IMAGE",),
|
||||
"operation": (["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"],),
|
||||
"kernel_size": ("INT", {"default": 3, "min": 3, "max": 999, "step": 1}),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="Morphology",
|
||||
display_name="ImageMorphology",
|
||||
category="image/postprocessing",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Combo.Input(
|
||||
"operation",
|
||||
options=["erode", "dilate", "open", "close", "gradient", "bottom_hat", "top_hat"],
|
||||
),
|
||||
io.Int.Input("kernel_size", default=3, min=3, max=999, step=1),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "process"
|
||||
|
||||
CATEGORY = "image/postprocessing"
|
||||
|
||||
def process(self, image, operation, kernel_size):
|
||||
@classmethod
|
||||
def execute(cls, image, operation, kernel_size) -> io.NodeOutput:
|
||||
device = comfy.model_management.get_torch_device()
|
||||
kernel = torch.ones(kernel_size, kernel_size, device=device)
|
||||
image_k = image.to(device).movedim(-1, 1)
|
||||
@ -39,49 +49,63 @@ class Morphology:
|
||||
else:
|
||||
raise ValueError(f"Invalid operation {operation} for morphology. Must be one of 'erode', 'dilate', 'open', 'close', 'gradient', 'tophat', 'bottomhat'")
|
||||
img_out = output.to(comfy.model_management.intermediate_device()).movedim(1, -1)
|
||||
return (img_out,)
|
||||
return io.NodeOutput(img_out)
|
||||
|
||||
|
||||
class ImageRGBToYUV:
|
||||
class ImageRGBToYUV(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": { "image": ("IMAGE",),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageRGBToYUV",
|
||||
category="image/batch",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(display_name="Y"),
|
||||
io.Image.Output(display_name="U"),
|
||||
io.Image.Output(display_name="V"),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE")
|
||||
RETURN_NAMES = ("Y", "U", "V")
|
||||
FUNCTION = "execute"
|
||||
|
||||
CATEGORY = "image/batch"
|
||||
|
||||
def execute(self, image):
|
||||
@classmethod
|
||||
def execute(cls, image) -> io.NodeOutput:
|
||||
out = kornia.color.rgb_to_ycbcr(image.movedim(-1, 1)).movedim(1, -1)
|
||||
return (out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image))
|
||||
return io.NodeOutput(out[..., 0:1].expand_as(image), out[..., 1:2].expand_as(image), out[..., 2:3].expand_as(image))
|
||||
|
||||
class ImageYUVToRGB:
|
||||
class ImageYUVToRGB(io.ComfyNode):
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {"Y": ("IMAGE",),
|
||||
"U": ("IMAGE",),
|
||||
"V": ("IMAGE",),
|
||||
}}
|
||||
def define_schema(cls):
|
||||
return io.Schema(
|
||||
node_id="ImageYUVToRGB",
|
||||
category="image/batch",
|
||||
inputs=[
|
||||
io.Image.Input("Y"),
|
||||
io.Image.Input("U"),
|
||||
io.Image.Input("V"),
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
FUNCTION = "execute"
|
||||
|
||||
CATEGORY = "image/batch"
|
||||
|
||||
def execute(self, Y, U, V):
|
||||
@classmethod
|
||||
def execute(cls, Y, U, V) -> io.NodeOutput:
|
||||
image = torch.cat([torch.mean(Y, dim=-1, keepdim=True), torch.mean(U, dim=-1, keepdim=True), torch.mean(V, dim=-1, keepdim=True)], dim=-1)
|
||||
out = kornia.color.ycbcr_to_rgb(image.movedim(-1, 1)).movedim(1, -1)
|
||||
return (out,)
|
||||
return io.NodeOutput(out)
|
||||
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"Morphology": Morphology,
|
||||
"ImageRGBToYUV": ImageRGBToYUV,
|
||||
"ImageYUVToRGB": ImageYUVToRGB,
|
||||
}
|
||||
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"Morphology": "ImageMorphology",
|
||||
}
|
||||
class MorphologyExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
Morphology,
|
||||
ImageRGBToYUV,
|
||||
ImageYUVToRGB,
|
||||
]
|
||||
|
||||
|
||||
async def comfy_entrypoint() -> MorphologyExtension:
|
||||
return MorphologyExtension()
|
||||
|
||||
|
||||
@ -1,96 +1,70 @@
|
||||
class Example:
|
||||
from typing_extensions import override
|
||||
|
||||
from comfy_api.latest import ComfyExtension, io
|
||||
|
||||
|
||||
class Example(io.ComfyNode):
|
||||
"""
|
||||
A example node
|
||||
An example node
|
||||
|
||||
Class methods
|
||||
-------------
|
||||
INPUT_TYPES (dict):
|
||||
Tell the main program input parameters of nodes.
|
||||
IS_CHANGED:
|
||||
define_schema (io.Schema):
|
||||
Tell the main program the metadata, input, output parameters of nodes.
|
||||
fingerprint_inputs:
|
||||
optional method to control when the node is re executed.
|
||||
check_lazy_status:
|
||||
optional method to control list of input names that need to be evaluated.
|
||||
|
||||
Attributes
|
||||
----------
|
||||
RETURN_TYPES (`tuple`):
|
||||
The type of each element in the output tuple.
|
||||
RETURN_NAMES (`tuple`):
|
||||
Optional: The name of each output in the output tuple.
|
||||
FUNCTION (`str`):
|
||||
The name of the entry-point method. For example, if `FUNCTION = "execute"` then it will run Example().execute()
|
||||
OUTPUT_NODE ([`bool`]):
|
||||
If this node is an output node that outputs a result/image from the graph. The SaveImage node is an example.
|
||||
The backend iterates on these output nodes and tries to execute all their parents if their parent graph is properly connected.
|
||||
Assumed to be False if not present.
|
||||
CATEGORY (`str`):
|
||||
The category the node should appear in the UI.
|
||||
DEPRECATED (`bool`):
|
||||
Indicates whether the node is deprecated. Deprecated nodes are hidden by default in the UI, but remain
|
||||
functional in existing workflows that use them.
|
||||
EXPERIMENTAL (`bool`):
|
||||
Indicates whether the node is experimental. Experimental nodes are marked as such in the UI and may be subject to
|
||||
significant changes or removal in future versions. Use with caution in production workflows.
|
||||
execute(s) -> tuple || None:
|
||||
The entry point method. The name of this method must be the same as the value of property `FUNCTION`.
|
||||
For example, if `FUNCTION = "execute"` then this method's name must be `execute`, if `FUNCTION = "foo"` then it must be `foo`.
|
||||
"""
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
def define_schema(cls) -> io.Schema:
|
||||
"""
|
||||
Return a dictionary which contains config for all input fields.
|
||||
Some types (string): "MODEL", "VAE", "CLIP", "CONDITIONING", "LATENT", "IMAGE", "INT", "STRING", "FLOAT".
|
||||
Input types "INT", "STRING" or "FLOAT" are special values for fields on the node.
|
||||
The type can be a list for selection.
|
||||
|
||||
Returns: `dict`:
|
||||
- Key input_fields_group (`string`): Can be either required, hidden or optional. A node class must have property `required`
|
||||
- Value input_fields (`dict`): Contains input fields config:
|
||||
* Key field_name (`string`): Name of a entry-point method's argument
|
||||
* Value field_config (`tuple`):
|
||||
+ First value is a string indicate the type of field or a list for selection.
|
||||
+ Second value is a config for type "INT", "STRING" or "FLOAT".
|
||||
Return a schema which contains all information about the node.
|
||||
Some types: "Model", "Vae", "Clip", "Conditioning", "Latent", "Image", "Int", "String", "Float", "Combo".
|
||||
For outputs the "io.Model.Output" should be used, for inputs the "io.Model.Input" can be used.
|
||||
The type can be a "Combo" - this will be a list for selection.
|
||||
"""
|
||||
return {
|
||||
"required": {
|
||||
"image": ("IMAGE",),
|
||||
"int_field": ("INT", {
|
||||
"default": 0,
|
||||
"min": 0, #Minimum value
|
||||
"max": 4096, #Maximum value
|
||||
"step": 64, #Slider's step
|
||||
"display": "number", # Cosmetic only: display as "number" or "slider"
|
||||
"lazy": True # Will only be evaluated if check_lazy_status requires it
|
||||
}),
|
||||
"float_field": ("FLOAT", {
|
||||
"default": 1.0,
|
||||
"min": 0.0,
|
||||
"max": 10.0,
|
||||
"step": 0.01,
|
||||
"round": 0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding.
|
||||
"display": "number",
|
||||
"lazy": True
|
||||
}),
|
||||
"print_to_screen": (["enable", "disable"],),
|
||||
"string_field": ("STRING", {
|
||||
"multiline": False, #True if you want the field to look like the one on the ClipTextEncode node
|
||||
"default": "Hello World!",
|
||||
"lazy": True
|
||||
}),
|
||||
},
|
||||
}
|
||||
return io.Schema(
|
||||
node_id="Example",
|
||||
display_name="Example Node",
|
||||
category="Example",
|
||||
inputs=[
|
||||
io.Image.Input("image"),
|
||||
io.Int.Input(
|
||||
"int_field",
|
||||
min=0,
|
||||
max=4096,
|
||||
step=64, # Slider's step
|
||||
display_mode=io.NumberDisplay.number, # Cosmetic only: display as "number" or "slider"
|
||||
lazy=True, # Will only be evaluated if check_lazy_status requires it
|
||||
),
|
||||
io.Float.Input(
|
||||
"float_field",
|
||||
default=1.0,
|
||||
min=0.0,
|
||||
max=10.0,
|
||||
step=0.01,
|
||||
round=0.001, #The value representing the precision to round to, will be set to the step value by default. Can be set to False to disable rounding.
|
||||
display_mode=io.NumberDisplay.number,
|
||||
lazy=True,
|
||||
),
|
||||
io.Combo.Input("print_to_screen", options=["enable", "disable"]),
|
||||
io.String.Input(
|
||||
"string_field",
|
||||
multiline=False, # True if you want the field to look like the one on the ClipTextEncode node
|
||||
default="Hello world!",
|
||||
lazy=True,
|
||||
)
|
||||
],
|
||||
outputs=[
|
||||
io.Image.Output(),
|
||||
],
|
||||
)
|
||||
|
||||
RETURN_TYPES = ("IMAGE",)
|
||||
#RETURN_NAMES = ("image_output_name",)
|
||||
|
||||
FUNCTION = "test"
|
||||
|
||||
#OUTPUT_NODE = False
|
||||
|
||||
CATEGORY = "Example"
|
||||
|
||||
def check_lazy_status(self, image, string_field, int_field, float_field, print_to_screen):
|
||||
@classmethod
|
||||
def check_lazy_status(cls, image, string_field, int_field, float_field, print_to_screen):
|
||||
"""
|
||||
Return a list of input names that need to be evaluated.
|
||||
|
||||
@ -107,7 +81,8 @@ class Example:
|
||||
else:
|
||||
return []
|
||||
|
||||
def test(self, image, string_field, int_field, float_field, print_to_screen):
|
||||
@classmethod
|
||||
def execute(cls, image, string_field, int_field, float_field, print_to_screen) -> io.NodeOutput:
|
||||
if print_to_screen == "enable":
|
||||
print(f"""Your input contains:
|
||||
string_field aka input text: {string_field}
|
||||
@ -116,7 +91,7 @@ class Example:
|
||||
""")
|
||||
#do some processing on the image, in this example I just invert it
|
||||
image = 1.0 - image
|
||||
return (image,)
|
||||
return io.NodeOutput(image)
|
||||
|
||||
"""
|
||||
The node will always be re executed if any of the inputs change but
|
||||
@ -127,7 +102,7 @@ class Example:
|
||||
changes between executions the LoadImage node is executed again.
|
||||
"""
|
||||
#@classmethod
|
||||
#def IS_CHANGED(s, image, string_field, int_field, float_field, print_to_screen):
|
||||
#def fingerprint_inputs(s, image, string_field, int_field, float_field, print_to_screen):
|
||||
# return ""
|
||||
|
||||
# Set the web directory, any .js file in that directory will be loaded by the frontend as a frontend extension
|
||||
@ -143,13 +118,13 @@ async def get_hello(request):
|
||||
return web.json_response("hello")
|
||||
|
||||
|
||||
# A dictionary that contains all nodes you want to export with their names
|
||||
# NOTE: names should be globally unique
|
||||
NODE_CLASS_MAPPINGS = {
|
||||
"Example": Example
|
||||
}
|
||||
class ExampleExtension(ComfyExtension):
|
||||
@override
|
||||
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||
return [
|
||||
Example,
|
||||
]
|
||||
|
||||
# A dictionary that contains the friendly/humanly readable titles for the nodes
|
||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
||||
"Example": "Example Node"
|
||||
}
|
||||
|
||||
async def comfy_entrypoint() -> ExampleExtension: # ComfyUI calls this to load your extension and its nodes.
|
||||
return ExampleExtension()
|
||||
|
||||
1
main.py
1
main.py
@ -141,6 +141,7 @@ if os.name == "nt":
|
||||
os.environ['MIMALLOC_PURGE_DELAY'] = '0'
|
||||
|
||||
if __name__ == "__main__":
|
||||
os.environ['TORCH_ROCM_AOTRITON_ENABLE_EXPERIMENTAL'] = '1'
|
||||
if args.default_device is not None:
|
||||
default_dev = args.default_device
|
||||
devices = list(range(32))
|
||||
|
||||
@ -22,3 +22,57 @@ lint.select = [
|
||||
"F",
|
||||
]
|
||||
exclude = ["*.ipynb", "**/generated/*.pyi"]
|
||||
|
||||
[tool.pylint]
|
||||
master.py-version = "3.9"
|
||||
master.extension-pkg-allow-list = [
|
||||
"pydantic",
|
||||
]
|
||||
reports.output-format = "colorized"
|
||||
similarities.ignore-imports = "yes"
|
||||
messages_control.disable = [
|
||||
"missing-module-docstring",
|
||||
"missing-class-docstring",
|
||||
"missing-function-docstring",
|
||||
"line-too-long",
|
||||
"too-few-public-methods",
|
||||
"too-many-public-methods",
|
||||
"too-many-instance-attributes",
|
||||
"too-many-positional-arguments",
|
||||
"broad-exception-raised",
|
||||
"too-many-lines",
|
||||
"invalid-name",
|
||||
"unused-argument",
|
||||
"broad-exception-caught",
|
||||
"consider-using-with",
|
||||
"fixme",
|
||||
"too-many-statements",
|
||||
"too-many-branches",
|
||||
"too-many-locals",
|
||||
"too-many-arguments",
|
||||
"duplicate-code",
|
||||
"abstract-method",
|
||||
"superfluous-parens",
|
||||
"arguments-differ",
|
||||
"redefined-builtin",
|
||||
"unnecessary-lambda",
|
||||
"dangerous-default-value",
|
||||
# next warnings should be fixed in future
|
||||
"bad-classmethod-argument", # Class method should have 'cls' as first argument
|
||||
"wrong-import-order", # Standard imports should be placed before third party imports
|
||||
"logging-fstring-interpolation", # Use lazy % formatting in logging functions
|
||||
"ungrouped-imports",
|
||||
"unnecessary-pass",
|
||||
"unidiomatic-typecheck",
|
||||
"unnecessary-lambda-assignment",
|
||||
"bad-indentation",
|
||||
"no-else-return",
|
||||
"no-else-raise",
|
||||
"invalid-overridden-method",
|
||||
"unused-variable",
|
||||
"pointless-string-statement",
|
||||
"inconsistent-return-statements",
|
||||
"import-outside-toplevel",
|
||||
"reimported",
|
||||
"redefined-outer-name",
|
||||
]
|
||||
|
||||
Loading…
Reference in New Issue
Block a user