mirror of
https://github.com/comfyanonymous/ComfyUI.git
synced 2026-04-17 22:12:30 +08:00
feat: add Phase 4 Chunk 4 review nodes (EvidencePackAssemble, ResponseDraft, ToneControl)
This commit is contained in:
parent
810756ecd6
commit
3473712bf9
@ -24,7 +24,10 @@ class ResearchExtension(ComfyExtension):
|
|||||||
from custom_nodes.research.review_map import ReviewMap
|
from custom_nodes.research.review_map import ReviewMap
|
||||||
from custom_nodes.research.evidence_gap_detect import EvidenceGapDetect
|
from custom_nodes.research.evidence_gap_detect import EvidenceGapDetect
|
||||||
from custom_nodes.research.action_route import ActionRoute
|
from custom_nodes.research.action_route import ActionRoute
|
||||||
return [PaperSearch, PaperClaimExtract, ClaimEvidenceAssemble, StyleProfileExtract, ReferencePaperSelect, SectionPlan, AbstractDraft, IntroductionDraft, MethodsDraft, ConsistencyCheck, ExportManuscript, ReviewImport, ReviewAtomize, ReviewClassify, ReviewMap, EvidenceGapDetect, ActionRoute]
|
from custom_nodes.research.evidence_pack_assemble import EvidencePackAssemble
|
||||||
|
from custom_nodes.research.response_draft import ResponseDraft
|
||||||
|
from custom_nodes.research.tone_control import ToneControl
|
||||||
|
return [PaperSearch, PaperClaimExtract, ClaimEvidenceAssemble, StyleProfileExtract, ReferencePaperSelect, SectionPlan, AbstractDraft, IntroductionDraft, MethodsDraft, ConsistencyCheck, ExportManuscript, ReviewImport, ReviewAtomize, ReviewClassify, ReviewMap, EvidenceGapDetect, ActionRoute, EvidencePackAssemble, ResponseDraft, ToneControl]
|
||||||
|
|
||||||
|
|
||||||
async def comfy_entrypoint() -> ComfyExtension:
|
async def comfy_entrypoint() -> ComfyExtension:
|
||||||
|
|||||||
95
custom_nodes/research/evidence_pack_assemble.py
Normal file
95
custom_nodes/research/evidence_pack_assemble.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
"""EvidencePackAssemble node - assemble evidence pack from claims and gaps."""
|
||||||
|
import json
|
||||||
|
from typing_extensions import override
|
||||||
|
from comfy_api.latest import ComfyNode, io
|
||||||
|
|
||||||
|
|
||||||
|
class EvidencePackAssemble(io.ComfyNode):
|
||||||
|
"""Assemble evidence pack from claims and gap report."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls) -> io.Schema:
|
||||||
|
return io.Schema(
|
||||||
|
node_id="EvidencePackAssemble",
|
||||||
|
display_name="Assemble Evidence Pack",
|
||||||
|
category="Research",
|
||||||
|
inputs=[
|
||||||
|
io.String.Input(
|
||||||
|
"gap_report",
|
||||||
|
display_name="Gap Report (JSON)",
|
||||||
|
default="{}",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
io.String.Input(
|
||||||
|
"claims_json",
|
||||||
|
display_name="Claims (JSON)",
|
||||||
|
default="[]",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
io.String.Input(
|
||||||
|
"papers_json",
|
||||||
|
display_name="Papers (JSON)",
|
||||||
|
default="[]",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.String.Output(display_name="Evidence Pack (JSON)"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, gap_report: str, claims_json: str, papers_json: str) -> io.NodeOutput:
|
||||||
|
try:
|
||||||
|
gap_data = json.loads(gap_report) if gap_report else {}
|
||||||
|
claims = json.loads(claims_json) if claims_json else []
|
||||||
|
papers = json.loads(papers_json) if papers_json else []
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
gap_data = {}
|
||||||
|
claims = []
|
||||||
|
papers = []
|
||||||
|
|
||||||
|
gaps = gap_data.get("gaps", [])
|
||||||
|
evidence_pack = {"packs": [], "summary": {"total": 0, "by_gap": {}}}
|
||||||
|
|
||||||
|
for gap in gaps:
|
||||||
|
gap_id = gap.get("gap_id", f"gap_{len(evidence_pack['packs']) + 1}")
|
||||||
|
gap_type = gap.get("gap_type", "")
|
||||||
|
item_id = gap.get("item_id", "")
|
||||||
|
|
||||||
|
# Find related claims
|
||||||
|
related_claims = [
|
||||||
|
c for c in claims
|
||||||
|
if any(
|
||||||
|
c.get("claim_text", "").lower() in gap.get("description", "").lower()
|
||||||
|
for _ in [1]
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Find related papers
|
||||||
|
related_papers = []
|
||||||
|
for claim in related_claims:
|
||||||
|
claim_text_lower = claim.get("claim_text", "").lower()
|
||||||
|
for paper in papers:
|
||||||
|
abstract = paper.get("abstract", "").lower()
|
||||||
|
title = paper.get("title", "").lower()
|
||||||
|
if claim_text_lower in abstract or claim_text_lower in title:
|
||||||
|
related_papers.append(paper)
|
||||||
|
|
||||||
|
pack = {
|
||||||
|
"gap_id": gap_id,
|
||||||
|
"item_id": item_id,
|
||||||
|
"gap_type": gap_type,
|
||||||
|
"severity": gap.get("severity", 1),
|
||||||
|
"description": gap.get("description", ""),
|
||||||
|
"related_claims": related_claims[:3],
|
||||||
|
"related_papers": related_papers[:2],
|
||||||
|
"evidence_strength": "strong" if len(related_claims) >= 2 else "weak",
|
||||||
|
}
|
||||||
|
|
||||||
|
evidence_pack["packs"].append(pack)
|
||||||
|
key = f"{gap_type}"
|
||||||
|
evidence_pack["summary"]["by_gap"][key] = evidence_pack["summary"]["by_gap"].get(key, 0) + 1
|
||||||
|
|
||||||
|
evidence_pack["summary"]["total"] = len(evidence_pack["packs"])
|
||||||
|
return io.NodeOutput(evidence_pack=json.dumps(evidence_pack, indent=2))
|
||||||
103
custom_nodes/research/response_draft.py
Normal file
103
custom_nodes/research/response_draft.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
"""ResponseDraft node - draft rebuttal/response text."""
|
||||||
|
import json
|
||||||
|
from typing_extensions import override
|
||||||
|
from comfy_api.latest import ComfyNode, io
|
||||||
|
|
||||||
|
|
||||||
|
class ResponseDraft(io.ComfyNode):
|
||||||
|
"""Draft response text for review items."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls) -> io.Schema:
|
||||||
|
return io.Schema(
|
||||||
|
node_id="ResponseDraft",
|
||||||
|
display_name="Draft Response",
|
||||||
|
category="Research",
|
||||||
|
inputs=[
|
||||||
|
io.String.Input(
|
||||||
|
"action_routes",
|
||||||
|
display_name="Action Routes (JSON)",
|
||||||
|
default="{}",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
io.String.Input(
|
||||||
|
"evidence_pack",
|
||||||
|
display_name="Evidence Pack (JSON)",
|
||||||
|
default="{}",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
io.String.Input(
|
||||||
|
"original_claims",
|
||||||
|
display_name="Original Claims (JSON)",
|
||||||
|
default="[]",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.String.Output(display_name="Drafted Responses (JSON)"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, action_routes: str, evidence_pack: str, original_claims: str) -> io.NodeOutput:
|
||||||
|
try:
|
||||||
|
routes_data = json.loads(action_routes) if action_routes else {}
|
||||||
|
evidence = json.loads(evidence_pack) if evidence_pack else {}
|
||||||
|
claims = json.loads(original_claims) if original_claims else []
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
routes_data = {}
|
||||||
|
evidence = {}
|
||||||
|
claims = []
|
||||||
|
|
||||||
|
routes = routes_data.get("routes", [])
|
||||||
|
packs = evidence.get("packs", [])
|
||||||
|
|
||||||
|
responses = []
|
||||||
|
|
||||||
|
for route in routes:
|
||||||
|
gap_id = route.get("gap_id", "")
|
||||||
|
action_type = route.get("action_type", "respond")
|
||||||
|
description = route.get("description", "")
|
||||||
|
|
||||||
|
# Find matching evidence pack
|
||||||
|
matching_pack = next((p for p in packs if p.get("gap_id") == gap_id), None)
|
||||||
|
|
||||||
|
if action_type == "experiment":
|
||||||
|
response_text = (
|
||||||
|
f"We acknowledge the concern regarding {description}. "
|
||||||
|
f"We will conduct additional experiments as suggested "
|
||||||
|
f"and include the results in the revised manuscript."
|
||||||
|
)
|
||||||
|
elif action_type == "citation":
|
||||||
|
response_text = (
|
||||||
|
f"We thank the reviewer for pointing out this gap. "
|
||||||
|
f"We will add relevant citations to address this concern."
|
||||||
|
)
|
||||||
|
elif action_type == "revise":
|
||||||
|
claims_text = ""
|
||||||
|
if matching_pack and matching_pack.get("related_claims"):
|
||||||
|
top_claim = matching_pack["related_claims"][0].get("claim_text", "")
|
||||||
|
claims_text = f" Our claim that '{top_claim[:50]}...' is supported by..."
|
||||||
|
response_text = (
|
||||||
|
f"We appreciate the feedback. {claims_text} "
|
||||||
|
f"We will revise the manuscript to strengthen this argument."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
response_text = (
|
||||||
|
f"We thank the reviewer for this comment. {description} "
|
||||||
|
f"We will address this concern in the revised manuscript."
|
||||||
|
)
|
||||||
|
|
||||||
|
responses.append({
|
||||||
|
"gap_id": gap_id,
|
||||||
|
"action_type": action_type,
|
||||||
|
"response_text": response_text,
|
||||||
|
"status": "drafted",
|
||||||
|
})
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"total_responses": len(responses),
|
||||||
|
"responses": responses,
|
||||||
|
}
|
||||||
|
|
||||||
|
return io.NodeOutput(drafted_responses=json.dumps(result, indent=2))
|
||||||
94
custom_nodes/research/tone_control.py
Normal file
94
custom_nodes/research/tone_control.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
"""ToneControl node - adjust tone of response text."""
|
||||||
|
import json
|
||||||
|
from typing_extensions import override
|
||||||
|
from comfy_api.latest import ComfyNode, io
|
||||||
|
|
||||||
|
|
||||||
|
class ToneControl(io.ComfyNode):
|
||||||
|
"""Adjust tone of drafted responses."""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls) -> io.Schema:
|
||||||
|
return io.Schema(
|
||||||
|
node_id="ToneControl",
|
||||||
|
display_name="Tone Control",
|
||||||
|
category="Research",
|
||||||
|
inputs=[
|
||||||
|
io.String.Input(
|
||||||
|
"drafted_responses",
|
||||||
|
display_name="Drafted Responses (JSON)",
|
||||||
|
default="{}",
|
||||||
|
multiline=True,
|
||||||
|
),
|
||||||
|
io.String.Input(
|
||||||
|
"tone",
|
||||||
|
display_name="Target Tone",
|
||||||
|
default="professional",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.String.Output(display_name="Finalized Responses (JSON)"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, drafted_responses: str, tone: str) -> io.NodeOutput:
|
||||||
|
try:
|
||||||
|
data = json.loads(drafted_responses) if drafted_responses else {}
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
data = {}
|
||||||
|
|
||||||
|
responses = data.get("responses", [])
|
||||||
|
|
||||||
|
tone_prefixes = {
|
||||||
|
"apologetic": "We sincerely apologize for...",
|
||||||
|
"professional": "We thank the reviewer for",
|
||||||
|
"confident": "We stand by our",
|
||||||
|
"diplomatic": "We appreciate the feedback and will",
|
||||||
|
}
|
||||||
|
|
||||||
|
tone_suffixes = {
|
||||||
|
"apologetic": "We are committed to improving this.",
|
||||||
|
"professional": "We will address this in the revision.",
|
||||||
|
"confident": "This is well-supported by our results.",
|
||||||
|
"diplomatic": "We will carefully consider this suggestion.",
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix = tone_prefixes.get(tone.lower(), "We thank the reviewer for")
|
||||||
|
suffix = tone_suffixes.get(tone.lower(), "We will address this in the revision.")
|
||||||
|
|
||||||
|
finalized = []
|
||||||
|
|
||||||
|
for resp in responses:
|
||||||
|
text = resp.get("response_text", "")
|
||||||
|
action_type = resp.get("action_type", "")
|
||||||
|
|
||||||
|
# Apply tone transformations
|
||||||
|
if tone.lower() == "apologetic":
|
||||||
|
text = f"{prefix} {text.lower()}"
|
||||||
|
text = text.rstrip(".") + f". {suffix}"
|
||||||
|
elif tone.lower() == "confident":
|
||||||
|
if "acknowledge" in text.lower():
|
||||||
|
text = text.replace("acknowledge", "note")
|
||||||
|
if "will revise" in text.lower():
|
||||||
|
text = text.replace("will revise", "have revised")
|
||||||
|
elif tone.lower() == "diplomatic":
|
||||||
|
text = text.replace("We will", "We would be happy to")
|
||||||
|
else: # professional
|
||||||
|
if not text.startswith("We "):
|
||||||
|
text = f"{prefix} {text}"
|
||||||
|
|
||||||
|
finalized.append({
|
||||||
|
**resp,
|
||||||
|
"response_text": text,
|
||||||
|
"tone_applied": tone,
|
||||||
|
"status": "finalized",
|
||||||
|
})
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"total": len(finalized),
|
||||||
|
"tone": tone,
|
||||||
|
"responses": finalized,
|
||||||
|
}
|
||||||
|
|
||||||
|
return io.NodeOutput(finalized_responses=json.dumps(result, indent=2))
|
||||||
Loading…
Reference in New Issue
Block a user