File size: 6,481 Bytes
1b21338 0fdf303 1b21338 0fdf303 1b21338 0fdf303 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import gradio as gr
import yaml, pandas as pd, os, re
PLAYBOOK_PATH = "playbook.yaml"
CEM_PATH = "cem.csv"
def load_playbook():
if not os.path.exists(PLAYBOOK_PATH):
return {"playbook": {"metadata": {"name": "ACE–CPT Playbook", "version": "0.1"}, "items": []}}
with open(PLAYBOOK_PATH, "r", encoding="utf-8") as f:
return yaml.safe_load(f) or {}
def save_playbook(pb):
with open(PLAYBOOK_PATH, "w", encoding="utf-8") as f:
yaml.safe_dump(pb, f, sort_keys=False, allow_unicode=True)
return True
def ensure_cem():
if not os.path.exists(CEM_PATH):
pd.DataFrame(columns=["ClaimID","Claim","EvidenceRefs","Counter-Evidence","Confidence"]).to_csv(CEM_PATH, index=False)
return pd.read_csv(CEM_PATH)
def save_cem(df):
df.to_csv(CEM_PATH, index=False)
return df
def run_generator(task, inputs, playbook_text):
# Stubbed generator
trace = [
f"S01: Restate task → {task[:120]}",
"S02: Identify relevant playbook items → PB-001, PB-004",
"S03: Apply heuristic reasoning (stub)",
"S04: Prepare answer with uncertainty note"
]
answer = "[Demo] Task addressed. (Replace with real model call.)"
used_items = "PB-001, PB-004"
cpt_notes = "2.11–2.17 quick pass; Flags: anthropomorphization=false, spec_gaming=possible, uncertainty=high"
return answer, "\n".join(trace), used_items, cpt_notes
def run_reflector(task_id, trace_text):
import re, yaml as _yaml
steps = re.findall(r"S(\d+):", trace_text)
evidence_steps = [f"S{sid}" for sid in steps[-2:]] if steps else []
deltas_yaml = {
"deltas": [{
"op": "add",
"proposed": {
"type": "checklist",
"title": "Preflight task restatement",
"content": "Before solving, restate the task in one line and list 2–3 success criteria plus one salient risk.",
"tags": ["meta","stillness"],
"evidence": {
"task_id": task_id or "T-000",
"trace_steps": evidence_steps,
"tests": [],
"sources": []
},
"confidence": "HIGH",
"paradigm_scope": ["method"],
"closure_flags": {"anthropomorphization": False, "spec_gaming": False, "monoculture": False},
"cem": {
"claim_id": "C-NEW",
"claim_text": "Preflight improves clarity and reduces failure risk.",
"evidence_refs": evidence_steps,
"counterevidence_refs": []
}
},
"rationale": "Reusable checklist that generalizes across tasks."
}]
}
return _yaml.safe_dump(deltas_yaml, sort_keys=False, allow_unicode=True)
def run_curator(playbook_text, deltas_text):
try:
pb = yaml.safe_load(playbook_text) if playbook_text.strip() else load_playbook()
if not pb or "playbook" not in pb:
pb = load_playbook()
except Exception:
pb = load_playbook()
try:
dz = yaml.safe_load(deltas_text) if deltas_text.strip() else {"deltas": []}
except Exception as e:
return playbook_text, f"YAML parse error in deltas: {e}"
items = pb.get("playbook", {}).get("items", [])
existing = {(it.get("title",""), it.get("type","")) for it in items}
next_id = 1
for it in items:
try:
n = int(str(it.get("id","PB-0")).split("-")[-1])
next_id = max(next_id, n+1)
except Exception:
pass
diff_lines = []
for d in dz.get("deltas", []):
if d.get("op") != "add":
continue
prop = d.get("proposed", {})
key = (prop.get("title",""), prop.get("type",""))
if key in existing:
diff_lines.append(f"~ duplicate skipped: {key[0]} ({key[1]})")
continue
new_item = {
"id": f"PB-{next_id:03d}",
"type": prop.get("type","heuristic"),
"title": prop.get("title","Untitled"),
"content": prop.get("content",""),
"tags": prop.get("tags",[]),
"helpful": 0,
"harmful": 0
}
items.append(new_item)
existing.add(key)
diff_lines.append(f"+ {new_item['id']}: {new_item['title']} ({new_item['type']})")
next_id += 1
pb["playbook"]["items"] = items
save_playbook(pb)
new_text = yaml.safe_dump(pb, sort_keys=False, allow_unicode=True)
return new_text, "\n".join(diff_lines) if diff_lines else "No changes applied."
def cem_load():
return ensure_cem()
def cem_save(df):
return save_cem(df)
with gr.Blocks(title="ACE–CPT Context Agent") as demo:
gr.Markdown("# ACE–CPT Context Agent\nA minimal ACE loop (Generator → Reflector → Curator) with CPT add‑ons.")
with gr.Row():
task = gr.Textbox(label="Task", placeholder="Describe the task…")
task_id = gr.Textbox(label="TaskID", value="T-001")
inputs = gr.Textbox(label="Inputs (optional)", lines=6, placeholder="Paste notes, success criteria, risks…")
playbook = gr.Textbox(label="Playbook (YAML)", lines=18, value=open(PLAYBOOK_PATH, "r", encoding="utf-8").read())
with gr.Row():
gen_btn = gr.Button("Run Generator")
ref_btn = gr.Button("Run Reflector")
cur_btn = gr.Button("Run Curator")
save_pb_btn = gr.Button("Save Playbook")
answer = gr.Textbox(label="Answer (Generator)", lines=3)
trace = gr.Textbox(label="Trace (Generator)", lines=8)
used = gr.Textbox(label="UsedItems (Generator)", lines=1)
cpt = gr.Textbox(label="CPT Quick Notes (Generator)", lines=6)
deltas = gr.Textbox(label="Reflector Deltas (YAML)", lines=10)
diff = gr.Textbox(label="Curator Merge Log", lines=4)
gr.Markdown("## Claim–Evidence Matrix (CEM)")
cem_dataframe = gr.Dataframe(value=cem_load(), interactive=True, wrap=True)
with gr.Row():
cem_save_btn = gr.Button("Save CEM")
gen_btn.click(run_generator, [task, inputs, playbook], [answer, trace, used, cpt])
ref_btn.click(run_reflector, [task_id, trace], deltas)
cur_btn.click(run_curator, [playbook, deltas], [playbook, diff])
save_pb_btn.click(lambda t: (save_playbook(yaml.safe_load(t)), t)[1], playbook, playbook)
cem_save_btn.click(cem_save, cem_dataframe, cem_dataframe)
if __name__ == "__main__":
demo.launch()
|