mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-04-17 14:02:38 +08:00
Merge upstream/master, keep local README.md
This commit is contained in:
commit
dbd2d27d6e
21
.github/PULL_REQUEST_TEMPLATE/api-node.md
vendored
Normal file
21
.github/PULL_REQUEST_TEMPLATE/api-node.md
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
<!-- API_NODE_PR_CHECKLIST: do not remove -->
|
||||||
|
|
||||||
|
## API Node PR Checklist
|
||||||
|
|
||||||
|
### Scope
|
||||||
|
- [ ] **Is API Node Change**
|
||||||
|
|
||||||
|
### Pricing & Billing
|
||||||
|
- [ ] **Need pricing update**
|
||||||
|
- [ ] **No pricing update**
|
||||||
|
|
||||||
|
If **Need pricing update**:
|
||||||
|
- [ ] Metronome rate cards updated
|
||||||
|
- [ ] Auto‑billing tests updated and passing
|
||||||
|
|
||||||
|
### QA
|
||||||
|
- [ ] **QA done**
|
||||||
|
- [ ] **QA not required**
|
||||||
|
|
||||||
|
### Comms
|
||||||
|
- [ ] Informed **@Kosinkadink**
|
||||||
58
.github/workflows/api-node-template.yml
vendored
Normal file
58
.github/workflows/api-node-template.yml
vendored
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
name: Append API Node PR template
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened, reopened, synchronize, edited, ready_for_review]
|
||||||
|
paths:
|
||||||
|
- 'comfy_api_nodes/**' # only run if these files changed
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
inject:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Ensure template exists and append to PR body
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const { owner, repo } = context.repo;
|
||||||
|
const number = context.payload.pull_request.number;
|
||||||
|
const templatePath = '.github/PULL_REQUEST_TEMPLATE/api-node.md';
|
||||||
|
const marker = '<!-- API_NODE_PR_CHECKLIST: do not remove -->';
|
||||||
|
|
||||||
|
const { data: pr } = await github.rest.pulls.get({ owner, repo, pull_number: number });
|
||||||
|
|
||||||
|
let templateText;
|
||||||
|
try {
|
||||||
|
const res = await github.rest.repos.getContent({
|
||||||
|
owner,
|
||||||
|
repo,
|
||||||
|
path: templatePath,
|
||||||
|
ref: pr.base.ref
|
||||||
|
});
|
||||||
|
const buf = Buffer.from(res.data.content, res.data.encoding || 'base64');
|
||||||
|
templateText = buf.toString('utf8');
|
||||||
|
} catch (e) {
|
||||||
|
core.setFailed(`Required PR template not found at "${templatePath}" on ${pr.base.ref}. Please add it to the repo.`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enforce the presence of the marker inside the template (for idempotence)
|
||||||
|
if (!templateText.includes(marker)) {
|
||||||
|
core.setFailed(`Template at "${templatePath}" does not contain the required marker:\n${marker}\nAdd it so we can detect duplicates safely.`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the PR already contains the marker, do not append again.
|
||||||
|
const body = pr.body || '';
|
||||||
|
if (body.includes(marker)) {
|
||||||
|
core.info('Template already present in PR body; nothing to inject.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const newBody = (body ? body + '\n\n' : '') + templateText + '\n';
|
||||||
|
await github.rest.pulls.update({ owner, repo, pull_number: number, body: newBody });
|
||||||
|
core.notice('API Node template appended to PR description.');
|
||||||
@ -167,39 +167,55 @@ class DoubleStreamBlock(nn.Module):
|
|||||||
img_modulated = self.img_norm1(img)
|
img_modulated = self.img_norm1(img)
|
||||||
img_modulated = apply_mod(img_modulated, (1 + img_mod1.scale), img_mod1.shift, modulation_dims_img)
|
img_modulated = apply_mod(img_modulated, (1 + img_mod1.scale), img_mod1.shift, modulation_dims_img)
|
||||||
img_qkv = self.img_attn.qkv(img_modulated)
|
img_qkv = self.img_attn.qkv(img_modulated)
|
||||||
|
del img_modulated
|
||||||
img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
img_q, img_k, img_v = img_qkv.view(img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||||
|
del img_qkv
|
||||||
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
|
img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
|
||||||
|
|
||||||
# prepare txt for attention
|
# prepare txt for attention
|
||||||
txt_modulated = self.txt_norm1(txt)
|
txt_modulated = self.txt_norm1(txt)
|
||||||
txt_modulated = apply_mod(txt_modulated, (1 + txt_mod1.scale), txt_mod1.shift, modulation_dims_txt)
|
txt_modulated = apply_mod(txt_modulated, (1 + txt_mod1.scale), txt_mod1.shift, modulation_dims_txt)
|
||||||
txt_qkv = self.txt_attn.qkv(txt_modulated)
|
txt_qkv = self.txt_attn.qkv(txt_modulated)
|
||||||
|
del txt_modulated
|
||||||
txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
txt_q, txt_k, txt_v = txt_qkv.view(txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||||
|
del txt_qkv
|
||||||
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
|
txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
|
||||||
|
|
||||||
if self.flipped_img_txt:
|
if self.flipped_img_txt:
|
||||||
|
q = torch.cat((img_q, txt_q), dim=2)
|
||||||
|
del img_q, txt_q
|
||||||
|
k = torch.cat((img_k, txt_k), dim=2)
|
||||||
|
del img_k, txt_k
|
||||||
|
v = torch.cat((img_v, txt_v), dim=2)
|
||||||
|
del img_v, txt_v
|
||||||
# run actual attention
|
# run actual attention
|
||||||
attn = attention(torch.cat((img_q, txt_q), dim=2),
|
attn = attention(q, k, v,
|
||||||
torch.cat((img_k, txt_k), dim=2),
|
|
||||||
torch.cat((img_v, txt_v), dim=2),
|
|
||||||
pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
||||||
|
del q, k, v
|
||||||
|
|
||||||
img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1]:]
|
img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1]:]
|
||||||
else:
|
else:
|
||||||
|
q = torch.cat((txt_q, img_q), dim=2)
|
||||||
|
del txt_q, img_q
|
||||||
|
k = torch.cat((txt_k, img_k), dim=2)
|
||||||
|
del txt_k, img_k
|
||||||
|
v = torch.cat((txt_v, img_v), dim=2)
|
||||||
|
del txt_v, img_v
|
||||||
# run actual attention
|
# run actual attention
|
||||||
attn = attention(torch.cat((txt_q, img_q), dim=2),
|
attn = attention(q, k, v,
|
||||||
torch.cat((txt_k, img_k), dim=2),
|
|
||||||
torch.cat((txt_v, img_v), dim=2),
|
|
||||||
pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
||||||
|
del q, k, v
|
||||||
|
|
||||||
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:]
|
txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:]
|
||||||
|
|
||||||
# calculate the img bloks
|
# calculate the img bloks
|
||||||
img += apply_mod(self.img_attn.proj(img_attn), img_mod1.gate, None, modulation_dims_img)
|
img += apply_mod(self.img_attn.proj(img_attn), img_mod1.gate, None, modulation_dims_img)
|
||||||
|
del img_attn
|
||||||
img += apply_mod(self.img_mlp(apply_mod(self.img_norm2(img), (1 + img_mod2.scale), img_mod2.shift, modulation_dims_img)), img_mod2.gate, None, modulation_dims_img)
|
img += apply_mod(self.img_mlp(apply_mod(self.img_norm2(img), (1 + img_mod2.scale), img_mod2.shift, modulation_dims_img)), img_mod2.gate, None, modulation_dims_img)
|
||||||
|
|
||||||
# calculate the txt bloks
|
# calculate the txt bloks
|
||||||
txt += apply_mod(self.txt_attn.proj(txt_attn), txt_mod1.gate, None, modulation_dims_txt)
|
txt += apply_mod(self.txt_attn.proj(txt_attn), txt_mod1.gate, None, modulation_dims_txt)
|
||||||
|
del txt_attn
|
||||||
txt += apply_mod(self.txt_mlp(apply_mod(self.txt_norm2(txt), (1 + txt_mod2.scale), txt_mod2.shift, modulation_dims_txt)), txt_mod2.gate, None, modulation_dims_txt)
|
txt += apply_mod(self.txt_mlp(apply_mod(self.txt_norm2(txt), (1 + txt_mod2.scale), txt_mod2.shift, modulation_dims_txt)), txt_mod2.gate, None, modulation_dims_txt)
|
||||||
|
|
||||||
if txt.dtype == torch.float16:
|
if txt.dtype == torch.float16:
|
||||||
@ -249,12 +265,15 @@ class SingleStreamBlock(nn.Module):
|
|||||||
qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
qkv, mlp = torch.split(self.linear1(apply_mod(self.pre_norm(x), (1 + mod.scale), mod.shift, modulation_dims)), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
|
||||||
|
|
||||||
q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
q, k, v = qkv.view(qkv.shape[0], qkv.shape[1], 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
||||||
|
del qkv
|
||||||
q, k = self.norm(q, k, v)
|
q, k = self.norm(q, k, v)
|
||||||
|
|
||||||
# compute attention
|
# compute attention
|
||||||
attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
attn = attention(q, k, v, pe=pe, mask=attn_mask, transformer_options=transformer_options)
|
||||||
|
del q, k, v
|
||||||
# compute activation in mlp stream, cat again and run second linear layer
|
# compute activation in mlp stream, cat again and run second linear layer
|
||||||
output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
|
mlp = self.mlp_act(mlp)
|
||||||
|
output = self.linear2(torch.cat((attn, mlp), 2))
|
||||||
x += apply_mod(output, mod.gate, None, modulation_dims)
|
x += apply_mod(output, mod.gate, None, modulation_dims)
|
||||||
if x.dtype == torch.float16:
|
if x.dtype == torch.float16:
|
||||||
x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504)
|
x = torch.nan_to_num(x, nan=0.0, posinf=65504, neginf=-65504)
|
||||||
|
|||||||
@ -2,6 +2,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import asyncio
|
import asyncio
|
||||||
import traceback
|
import traceback
|
||||||
|
import time
|
||||||
|
|
||||||
import nodes
|
import nodes
|
||||||
import folder_paths
|
import folder_paths
|
||||||
@ -733,6 +734,7 @@ class PromptServer():
|
|||||||
for sensitive_val in execution.SENSITIVE_EXTRA_DATA_KEYS:
|
for sensitive_val in execution.SENSITIVE_EXTRA_DATA_KEYS:
|
||||||
if sensitive_val in extra_data:
|
if sensitive_val in extra_data:
|
||||||
sensitive[sensitive_val] = extra_data.pop(sensitive_val)
|
sensitive[sensitive_val] = extra_data.pop(sensitive_val)
|
||||||
|
extra_data["create_time"] = int(time.time() * 1000) # timestamp in milliseconds
|
||||||
self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive))
|
self.prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute, sensitive))
|
||||||
response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
|
response = {"prompt_id": prompt_id, "number": number, "node_errors": valid[3]}
|
||||||
return web.json_response(response)
|
return web.json_response(response)
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user