mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-02-10 13:32:36 +08:00
Added base64
This commit is contained in:
parent
43a31b3f59
commit
08a991eab8
@ -6,6 +6,10 @@ import uuid
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
from PIL import Image
|
||||
import base64
|
||||
import io
|
||||
|
||||
|
||||
|
||||
import runpod
|
||||
@ -62,12 +66,20 @@ def get_images(ws, prompt):
|
||||
def run_prompt(job):
|
||||
|
||||
prompt_text = job["input"]["prompt"]
|
||||
print(".................Type..............",type(prompt_text))
|
||||
|
||||
prompt = prompt_text
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
|
||||
images = get_images(ws, prompt)
|
||||
return True
|
||||
data = {'images':[]}
|
||||
for node_id in images:
|
||||
for image_data in images[node_id]:
|
||||
image = Image.open(io.BytesIO(image_data))
|
||||
im_file = BytesIO()
|
||||
image.save(im_file, format="JPEG")
|
||||
im_bytes = im_file.getvalue() # im_bytes: image in binary format.
|
||||
im_b64 = base64.b64encode(im_bytes)
|
||||
data['images'].append(im_b64)
|
||||
return data
|
||||
|
||||
|
||||
runpod.serverless.start({"handler":run_prompt})
|
||||
|
||||
31
my_script_examples/basic_api_example.py
Normal file
31
my_script_examples/basic_api_example.py
Normal file
@ -0,0 +1,31 @@
|
||||
import json
|
||||
from urllib import request, parse
|
||||
import random
|
||||
|
||||
#this is the ComfyUI api prompt format. If you want it for a specific workflow you can copy it from the prompt section
|
||||
#of the image metadata of images generated with ComfyUI
|
||||
#keep in mind ComfyUI is pre alpha software so this format will change a bit.
|
||||
|
||||
#this is the one for the default workflow
|
||||
prompt_text = """
|
||||
|
||||
|
||||
{"3": {"inputs": {"seed": 160913364876129, "steps": 16, "cfg": 6.0, "sampler_name": "uni_pc", "scheduler": "normal", "denoise": 1.0, "model": ["14", 0], "positive": ["10", 0], "negative": ["7", 0], "latent_image": ["5", 0]}, "class_type": "KSampler"}, "5": {"inputs": {"width": 512, "height": 512, "batch_size": 1}, "class_type": "EmptyLatentImage"}, "6": {"inputs": {"text": "(solo) girl (flat chest:0.9), (fennec ears:1.1)\u00a0 (fox ears:1.1), (blonde hair:1.0), messy hair, sky clouds, standing in a grass field, (chibi), blue eyes", "clip": ["14", 1]}, "class_type": "CLIPTextEncode"}, "7": {"inputs": {"text": "(hands), text, error, cropped, (worst quality:1.2), (low quality:1.2), normal quality, (jpeg artifacts:1.3), signature, watermark, username, blurry, artist name, monochrome, sketch, censorship, censor, (copyright:1.2), extra legs, (forehead mark) (depth of field) (emotionless) (penis)", "clip": ["14", 1]}, "class_type": "CLIPTextEncode"}, "8": {"inputs": {"samples": ["3", 0], "vae": ["13", 0]}, "class_type": "VAEDecode"}, "9": {"inputs": {"filename_prefix": "ComfyUI", "images": ["8", 0]}, "class_type": "SaveImage"}, "10": {"inputs": {"strength": 0.8999999999999999, "conditioning": ["6", 0], "control_net": ["12", 0], "image": ["11", 0]}, "class_type": "ControlNetApply"}, "11": {"inputs": {"image": "ComfyUI_00005_ (1).png", "choose file to upload": "image"}, "class_type": "LoadImage", "is_changed": ["7657ee164339745ea7b5300e55a7655f0404fbb5a0a61d990748027a19e2f178"]}, "12": {"inputs": {"control_net_name": "control_v11p_sd15_canny_fp16.safetensors"}, "class_type": "ControlNetLoader"}, "13": {"inputs": {"vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"}, "class_type": "VAELoader"}, "14": {"inputs": {"ckpt_name": "sd-v1-4.ckpt"}, "class_type": "CheckpointLoaderSimple"}}
|
||||
"""
|
||||
|
||||
def queue_prompt(prompt):
|
||||
p = {"prompt": prompt}
|
||||
data = json.dumps(p).encode('utf-8')
|
||||
req = request.Request("http://127.0.0.1:8188/prompt", data=data)
|
||||
request.urlopen(req)
|
||||
|
||||
|
||||
prompt = json.loads(prompt_text)
|
||||
#set the text prompt for our positive CLIPTextEncode
|
||||
prompt["6"]["inputs"]["text"] = "masterpiece best quality man"
|
||||
|
||||
#set the seed for our KSampler node
|
||||
prompt["3"]["inputs"]["seed"] = 5
|
||||
|
||||
|
||||
queue_prompt(prompt)
|
||||
366
my_script_examples/def.json
Normal file
366
my_script_examples/def.json
Normal file
@ -0,0 +1,366 @@
|
||||
{
|
||||
"last_node_id": 9,
|
||||
"last_link_id": 9,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 7,
|
||||
"class_type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
413,
|
||||
389
|
||||
],
|
||||
"size": {
|
||||
"0": 425.27801513671875,
|
||||
"1": 180.6060791015625
|
||||
},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 5
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [
|
||||
6
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": [
|
||||
"text, watermark"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"class_type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
415,
|
||||
186
|
||||
],
|
||||
"size": {
|
||||
"0": 422.84503173828125,
|
||||
"1": 164.31304931640625
|
||||
},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 3
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [
|
||||
4
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": [
|
||||
"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"class_type": "EmptyLatentImage",
|
||||
"pos": [
|
||||
473,
|
||||
609
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 106
|
||||
},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [
|
||||
2
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [
|
||||
512,
|
||||
512,
|
||||
1
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"class_type": "KSampler",
|
||||
"pos": [
|
||||
863,
|
||||
186
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 262
|
||||
},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 1
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 4
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 6
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 2
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [
|
||||
7
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
156680208700286,
|
||||
"randomize",
|
||||
20,
|
||||
8,
|
||||
"euler",
|
||||
"normal",
|
||||
1
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"class_type": "VAEDecode",
|
||||
"pos": [
|
||||
1209,
|
||||
188
|
||||
],
|
||||
"size": {
|
||||
"0": 210,
|
||||
"1": 46
|
||||
},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 7
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 8
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [
|
||||
9
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"class_type": "SaveImage",
|
||||
"pos": [
|
||||
1451,
|
||||
189
|
||||
],
|
||||
"size": {
|
||||
"0": 210,
|
||||
"1": 58
|
||||
},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"properties": {},
|
||||
"widgets_values": [
|
||||
"ComfyUI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
26,
|
||||
474
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 98
|
||||
},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [
|
||||
1
|
||||
],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [
|
||||
3,
|
||||
5
|
||||
],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [
|
||||
8
|
||||
],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": [
|
||||
"sd-v1-4.ckpt"
|
||||
]
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[
|
||||
1,
|
||||
4,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
"MODEL"
|
||||
],
|
||||
[
|
||||
2,
|
||||
5,
|
||||
0,
|
||||
3,
|
||||
3,
|
||||
"LATENT"
|
||||
],
|
||||
[
|
||||
3,
|
||||
4,
|
||||
1,
|
||||
6,
|
||||
0,
|
||||
"CLIP"
|
||||
],
|
||||
[
|
||||
4,
|
||||
6,
|
||||
0,
|
||||
3,
|
||||
1,
|
||||
"CONDITIONING"
|
||||
],
|
||||
[
|
||||
5,
|
||||
4,
|
||||
1,
|
||||
7,
|
||||
0,
|
||||
"CLIP"
|
||||
],
|
||||
[
|
||||
6,
|
||||
7,
|
||||
0,
|
||||
3,
|
||||
2,
|
||||
"CONDITIONING"
|
||||
],
|
||||
[
|
||||
7,
|
||||
3,
|
||||
0,
|
||||
8,
|
||||
0,
|
||||
"LATENT"
|
||||
],
|
||||
[
|
||||
8,
|
||||
4,
|
||||
2,
|
||||
8,
|
||||
1,
|
||||
"VAE"
|
||||
],
|
||||
[
|
||||
9,
|
||||
8,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
"IMAGE"
|
||||
]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {},
|
||||
"version": 0.4
|
||||
}
|
||||
1
my_script_examples/error.json
Normal file
1
my_script_examples/error.json
Normal file
@ -0,0 +1 @@
|
||||
{"delayTime":18267,"error":"{\n \"error_type\": \"<class 'ConnectionRefusedError'>\",\n \"error_message\": \"[Errno 111] Connection refused\",\n \"error_traceback\": \"Traceback (most recent call last):\\n File \\\"/usr/local/lib/python3.8/dist-packages/runpod/serverless/modules/job.py\\\", line 82, in run_job\\n job_output = handler(job)\\n File \\\"comfy_runpod.py\\\", line 69, in run_prompt\\n ws.connect(\\\"ws://{}/ws?clientId={}\\\".format(server_address, client_id))\\n File \\\"/usr/local/lib/python3.8/dist-packages/websocket/_core.py\\\", line 249, in connect\\n self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),\\n File \\\"/usr/local/lib/python3.8/dist-packages/websocket/_http.py\\\", line 130, in connect\\n sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)\\n File \\\"/usr/local/lib/python3.8/dist-packages/websocket/_http.py\\\", line 205, in _open_socket\\n raise err\\n File \\\"/usr/local/lib/python3.8/dist-packages/websocket/_http.py\\\", line 185, in _open_socket\\n sock.connect(address)\\nConnectionRefusedError: [Errno 111] Connection refused\\n\",\n \"host_name\": \"i23pqmpyc929gv-64410b8b\",\n \"pod_id\": \"i23pqmpyc929gv\"\n}","executionTime":800,"id":"sync-bf064733-5017-441f-819b-94110a097541","status":"FAILED"}
|
||||
73
my_script_examples/runpod_websocket.py
Normal file
73
my_script_examples/runpod_websocket.py
Normal file
@ -0,0 +1,73 @@
|
||||
#This is an example that uses the websockets api to know when a prompt execution is done
|
||||
#Once the prompt execution is done it downloads the images using the /history endpoint
|
||||
|
||||
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
||||
import uuid
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
|
||||
|
||||
import runpod
|
||||
|
||||
|
||||
server_address = "127.0.0.1:8188"
|
||||
client_id = str(uuid.uuid4())
|
||||
|
||||
def queue_prompt(prompt):
|
||||
p = {"prompt": prompt, "client_id": client_id}
|
||||
data = json.dumps(p).encode('utf-8')
|
||||
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
|
||||
return json.loads(urllib.request.urlopen(req).read())
|
||||
|
||||
def get_image(filename, subfolder, folder_type):
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
|
||||
return response.read()
|
||||
|
||||
def get_history(prompt_id):
|
||||
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
|
||||
return json.loads(response.read())
|
||||
|
||||
def get_images(ws, prompt):
|
||||
prompt_id = queue_prompt(prompt)['prompt_id']
|
||||
output_images = {}
|
||||
while True:
|
||||
out = ws.recv()
|
||||
if isinstance(out, str):
|
||||
message = json.loads(out)
|
||||
if message['type'] == 'executing':
|
||||
data = message['data']
|
||||
if data['node'] is None and data['prompt_id'] == prompt_id:
|
||||
break #Execution is done
|
||||
else:
|
||||
continue #previews are binary data
|
||||
|
||||
history = get_history(prompt_id)[prompt_id]
|
||||
for o in history['outputs']:
|
||||
for node_id in history['outputs']:
|
||||
node_output = history['outputs'][node_id]
|
||||
if 'images' in node_output:
|
||||
images_output = []
|
||||
for image in node_output['images']:
|
||||
image_data = get_image(image['filename'], image['subfolder'], image['type'])
|
||||
images_output.append(image_data)
|
||||
output_images[node_id] = images_output
|
||||
|
||||
return output_images
|
||||
|
||||
|
||||
|
||||
def run_prompt(job):
|
||||
|
||||
prompt_text = job["prompt"]
|
||||
|
||||
|
||||
prompt = json.loads(prompt_text)
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
|
||||
images = get_images(ws, prompt)
|
||||
return images
|
||||
|
||||
runpod.serverless.start({"handler":run_prompt})
|
||||
94
my_script_examples/websockets_api_example.py
Normal file
94
my_script_examples/websockets_api_example.py
Normal file
@ -0,0 +1,94 @@
|
||||
#This is an example that uses the websockets api to know when a prompt execution is done
|
||||
#Once the prompt execution is done it downloads the images using the /history endpoint
|
||||
|
||||
import websocket #NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
|
||||
import uuid
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.parse
|
||||
|
||||
server_address = "127.0.0.1:8188"
|
||||
client_id = str(uuid.uuid4())
|
||||
|
||||
def queue_prompt(prompt):
|
||||
p = {"prompt": prompt, "client_id": client_id}
|
||||
data = json.dumps(p).encode('utf-8')
|
||||
req = urllib.request.Request("http://{}/prompt".format(server_address), data=data)
|
||||
return json.loads(urllib.request.urlopen(req).read())
|
||||
|
||||
def get_image(filename, subfolder, folder_type):
|
||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||
url_values = urllib.parse.urlencode(data)
|
||||
with urllib.request.urlopen("http://{}/view?{}".format(server_address, url_values)) as response:
|
||||
return response.read()
|
||||
|
||||
def get_history(prompt_id):
|
||||
with urllib.request.urlopen("http://{}/history/{}".format(server_address, prompt_id)) as response:
|
||||
return json.loads(response.read())
|
||||
|
||||
def get_images(ws, prompt):
|
||||
prompt_id = queue_prompt(prompt)['prompt_id']
|
||||
output_images = {}
|
||||
while True:
|
||||
out = ws.recv()
|
||||
if isinstance(out, str):
|
||||
message = json.loads(out)
|
||||
if message['type'] == 'executing':
|
||||
data = message['data']
|
||||
if data['node'] is None and data['prompt_id'] == prompt_id:
|
||||
break #Execution is done
|
||||
else:
|
||||
continue #previews are binary data
|
||||
|
||||
history = get_history(prompt_id)[prompt_id]
|
||||
for o in history['outputs']:
|
||||
for node_id in history['outputs']:
|
||||
node_output = history['outputs'][node_id]
|
||||
if 'images' in node_output:
|
||||
images_output = []
|
||||
for image in node_output['images']:
|
||||
image_data = get_image(image['filename'], image['subfolder'], image['type'])
|
||||
images_output.append(image_data)
|
||||
output_images[node_id] = images_output
|
||||
|
||||
return output_images
|
||||
|
||||
# prompt_text = """
|
||||
|
||||
# {"3": {"inputs": {"seed": 160913364876129, "steps": 16, "cfg": 6.0, "sampler_name": "uni_pc", "scheduler": "normal", "denoise": 1.0, "model": ["14", 0], "positive": ["10", 0], "negative": ["7", 0], "latent_image": ["5", 0]}, "class_type": "KSampler"}, "5": {"inputs": {"width": 512, "height": 512, "batch_size": 1}, "class_type": "EmptyLatentImage"}, "6": {"inputs": {"text": "(solo) girl (flat chest:0.9), (fennec ears:1.1)\u00a0 (fox ears:1.1), (blonde hair:1.0), messy hair, sky clouds, standing in a grass field, (chibi), blue eyes", "clip": ["14", 1]}, "class_type": "CLIPTextEncode"}, "7": {"inputs": {"text": "(hands), text, error, cropped, (worst quality:1.2), (low quality:1.2), normal quality, (jpeg artifacts:1.3), signature, watermark, username, blurry, artist name, monochrome, sketch, censorship, censor, (copyright:1.2), extra legs, (forehead mark) (depth of field) (emotionless) (penis)", "clip": ["14", 1]}, "class_type": "CLIPTextEncode"}, "8": {"inputs": {"samples": ["3", 0], "vae": ["13", 0]}, "class_type": "VAEDecode"}, "9": {"inputs": {"filename_prefix": "ComfyUI", "images": ["8", 0]}, "class_type": "SaveImage"}, "10": {"inputs": {"strength": 0.8999999999999999, "conditioning": ["6", 0], "control_net": ["12", 0], "image": ["11", 0]}, "class_type": "ControlNetApply"}, "11": {"inputs": {"image": "ComfyUI_00005_ (1).png", "choose file to upload": "image"}, "class_type": "LoadImage", "is_changed": ["7657ee164339745ea7b5300e55a7655f0404fbb5a0a61d990748027a19e2f178"]}, "12": {"inputs": {"control_net_name": "control_v11p_sd15_canny_fp16.safetensors"}, "class_type": "ControlNetLoader"}, "13": {"inputs": {"vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"}, "class_type": "VAELoader"}, "14": {"inputs": {"ckpt_name": "sd-v1-4.ckpt"}, "class_type": "CheckpointLoaderSimple"}}
|
||||
# """
|
||||
|
||||
prompt_text = """
|
||||
|
||||
{"3": {"inputs": {"seed": 156680208700286, "steps": 20, "cfg": 8.0, "sampler_name": "euler", "scheduler": "normal", "denoise": 1.0, "model": ["4", 0], "positive": ["6", 0], "negative": ["7", 0], "latent_image": ["5", 0]}, "class_type": "KSampler"}, "4": {"inputs": {"ckpt_name": "sd-v1-4.ckpt"}, "class_type": "CheckpointLoaderSimple"}, "5": {"inputs": {"width": 512, "height": 512, "batch_size": 1}, "class_type": "EmptyLatentImage"}, "6": {"inputs": {"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,", "clip": ["4", 1]}, "class_type": "CLIPTextEncode"}, "7": {"inputs": {"text": "text, watermark", "clip": ["4", 1]}, "class_type": "CLIPTextEncode"}, "8": {"inputs": {"samples": ["3", 0], "vae": ["4", 2]}, "class_type": "VAEDecode"}, "9": {"inputs": {"filename_prefix": "ComfyUI", "images": ["8", 0]}, "class_type": "SaveImage"}}
|
||||
|
||||
"""
|
||||
|
||||
|
||||
prompt = json.loads(prompt_text)
|
||||
#set the text prompt for our positive CLIPTextEncode
|
||||
# prompt["6"]["inputs"]["text"] = "a girl in anime style, cute"
|
||||
|
||||
# prompt["11"]["inputs"]["image"] = "D:/Downloads/IMG_7150.jpeg"
|
||||
|
||||
# #for sd 2.1
|
||||
# prompt["12"]["inputs"]["control_net_name"] = "controlnetFurususSD21_21Canny.safetensors"
|
||||
# prompt["14"]["inputs"]["ckpt_name"] = "v2-1_512-ema-pruned.ckpt"
|
||||
|
||||
|
||||
# #set the seed for our KSampler node
|
||||
# prompt["3"]["inputs"]["seed"] = 5
|
||||
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
|
||||
images = get_images(ws, prompt)
|
||||
|
||||
#Commented out code to display the output images:
|
||||
|
||||
for node_id in images:
|
||||
for image_data in images[node_id]:
|
||||
from PIL import Image
|
||||
import io
|
||||
image = Image.open(io.BytesIO(image_data))
|
||||
image.show()
|
||||
|
||||
1
my_script_examples/working.json
Normal file
1
my_script_examples/working.json
Normal file
@ -0,0 +1 @@
|
||||
{"3": {"inputs": {"seed": 160913364876129, "steps": 16, "cfg": 6.0, "sampler_name": "uni_pc", "scheduler": "normal", "denoise": 1.0, "model": ["14", 0], "positive": ["10", 0], "negative": ["7", 0], "latent_image": ["5", 0]}, "class_type": "KSampler"}, "5": {"inputs": {"width": 512, "height": 512, "batch_size": 1}, "class_type": "EmptyLatentImage"}, "6": {"inputs": {"text": "(solo) girl (flat chest:0.9), (fennec ears:1.1)\u00a0 (fox ears:1.1), (blonde hair:1.0), messy hair, sky clouds, standing in a grass field, (chibi), blue eyes", "clip": ["14", 1]}, "class_type": "CLIPTextEncode"}, "7": {"inputs": {"text": "(hands), text, error, cropped, (worst quality:1.2), (low quality:1.2), normal quality, (jpeg artifacts:1.3), signature, watermark, username, blurry, artist name, monochrome, sketch, censorship, censor, (copyright:1.2), extra legs, (forehead mark) (depth of field) (emotionless) (penis)", "clip": ["14", 1]}, "class_type": "CLIPTextEncode"}, "8": {"inputs": {"samples": ["3", 0], "vae": ["13", 0]}, "class_type": "VAEDecode"}, "9": {"inputs": {"filename_prefix": "ComfyUI", "images": ["8", 0]}, "class_type": "SaveImage"}, "10": {"inputs": {"strength": 0.8999999999999999, "conditioning": ["6", 0], "control_net": ["12", 0], "image": ["11", 0]}, "class_type": "ControlNetApply"}, "11": {"inputs": {"image": "ComfyUI_00005_ (1).png", "choose file to upload": "image"}, "class_type": "LoadImage", "is_changed": ["7657ee164339745ea7b5300e55a7655f0404fbb5a0a61d990748027a19e2f178"]}, "12": {"inputs": {"control_net_name": "control_v11p_sd15_canny_fp16.safetensors"}, "class_type": "ControlNetLoader"}, "13": {"inputs": {"vae_name": "vae-ft-mse-840000-ema-pruned.safetensors"}, "class_type": "VAELoader"}, "14": {"inputs": {"ckpt_name": "sd-v1-4.ckpt"}, "class_type": "CheckpointLoaderSimple"}}
|
||||
Loading…
Reference in New Issue
Block a user