paradigms / app.py
DranFren's picture
Upload 5 files
1b21338 verified
import gradio as gr
import yaml, pandas as pd, os, re
PLAYBOOK_PATH = "playbook.yaml"
CEM_PATH = "cem.csv"
def load_playbook():
if not os.path.exists(PLAYBOOK_PATH):
return {"playbook": {"metadata": {"name": "ACE–CPT Playbook", "version": "0.1"}, "items": []}}
with open(PLAYBOOK_PATH, "r", encoding="utf-8") as f:
return yaml.safe_load(f) or {}
def save_playbook(pb):
with open(PLAYBOOK_PATH, "w", encoding="utf-8") as f:
yaml.safe_dump(pb, f, sort_keys=False, allow_unicode=True)
return True
def ensure_cem():
if not os.path.exists(CEM_PATH):
pd.DataFrame(columns=["ClaimID","Claim","EvidenceRefs","Counter-Evidence","Confidence"]).to_csv(CEM_PATH, index=False)
return pd.read_csv(CEM_PATH)
def save_cem(df):
df.to_csv(CEM_PATH, index=False)
return df
def run_generator(task, inputs, playbook_text):
# Stubbed generator
trace = [
f"S01: Restate task → {task[:120]}",
"S02: Identify relevant playbook items → PB-001, PB-004",
"S03: Apply heuristic reasoning (stub)",
"S04: Prepare answer with uncertainty note"
]
answer = "[Demo] Task addressed. (Replace with real model call.)"
used_items = "PB-001, PB-004"
cpt_notes = "2.11–2.17 quick pass; Flags: anthropomorphization=false, spec_gaming=possible, uncertainty=high"
return answer, "\n".join(trace), used_items, cpt_notes
def run_reflector(task_id, trace_text):
import re, yaml as _yaml
steps = re.findall(r"S(\d+):", trace_text)
evidence_steps = [f"S{sid}" for sid in steps[-2:]] if steps else []
deltas_yaml = {
"deltas": [{
"op": "add",
"proposed": {
"type": "checklist",
"title": "Preflight task restatement",
"content": "Before solving, restate the task in one line and list 2–3 success criteria plus one salient risk.",
"tags": ["meta","stillness"],
"evidence": {
"task_id": task_id or "T-000",
"trace_steps": evidence_steps,
"tests": [],
"sources": []
},
"confidence": "HIGH",
"paradigm_scope": ["method"],
"closure_flags": {"anthropomorphization": False, "spec_gaming": False, "monoculture": False},
"cem": {
"claim_id": "C-NEW",
"claim_text": "Preflight improves clarity and reduces failure risk.",
"evidence_refs": evidence_steps,
"counterevidence_refs": []
}
},
"rationale": "Reusable checklist that generalizes across tasks."
}]
}
return _yaml.safe_dump(deltas_yaml, sort_keys=False, allow_unicode=True)
def run_curator(playbook_text, deltas_text):
try:
pb = yaml.safe_load(playbook_text) if playbook_text.strip() else load_playbook()
if not pb or "playbook" not in pb:
pb = load_playbook()
except Exception:
pb = load_playbook()
try:
dz = yaml.safe_load(deltas_text) if deltas_text.strip() else {"deltas": []}
except Exception as e:
return playbook_text, f"YAML parse error in deltas: {e}"
items = pb.get("playbook", {}).get("items", [])
existing = {(it.get("title",""), it.get("type","")) for it in items}
next_id = 1
for it in items:
try:
n = int(str(it.get("id","PB-0")).split("-")[-1])
next_id = max(next_id, n+1)
except Exception:
pass
diff_lines = []
for d in dz.get("deltas", []):
if d.get("op") != "add":
continue
prop = d.get("proposed", {})
key = (prop.get("title",""), prop.get("type",""))
if key in existing:
diff_lines.append(f"~ duplicate skipped: {key[0]} ({key[1]})")
continue
new_item = {
"id": f"PB-{next_id:03d}",
"type": prop.get("type","heuristic"),
"title": prop.get("title","Untitled"),
"content": prop.get("content",""),
"tags": prop.get("tags",[]),
"helpful": 0,
"harmful": 0
}
items.append(new_item)
existing.add(key)
diff_lines.append(f"+ {new_item['id']}: {new_item['title']} ({new_item['type']})")
next_id += 1
pb["playbook"]["items"] = items
save_playbook(pb)
new_text = yaml.safe_dump(pb, sort_keys=False, allow_unicode=True)
return new_text, "\n".join(diff_lines) if diff_lines else "No changes applied."
def cem_load():
return ensure_cem()
def cem_save(df):
return save_cem(df)
with gr.Blocks(title="ACE–CPT Context Agent") as demo:
gr.Markdown("# ACE–CPT Context Agent\nA minimal ACE loop (Generator → Reflector → Curator) with CPT add‑ons.")
with gr.Row():
task = gr.Textbox(label="Task", placeholder="Describe the task…")
task_id = gr.Textbox(label="TaskID", value="T-001")
inputs = gr.Textbox(label="Inputs (optional)", lines=6, placeholder="Paste notes, success criteria, risks…")
playbook = gr.Textbox(label="Playbook (YAML)", lines=18, value=open(PLAYBOOK_PATH, "r", encoding="utf-8").read())
with gr.Row():
gen_btn = gr.Button("Run Generator")
ref_btn = gr.Button("Run Reflector")
cur_btn = gr.Button("Run Curator")
save_pb_btn = gr.Button("Save Playbook")
answer = gr.Textbox(label="Answer (Generator)", lines=3)
trace = gr.Textbox(label="Trace (Generator)", lines=8)
used = gr.Textbox(label="UsedItems (Generator)", lines=1)
cpt = gr.Textbox(label="CPT Quick Notes (Generator)", lines=6)
deltas = gr.Textbox(label="Reflector Deltas (YAML)", lines=10)
diff = gr.Textbox(label="Curator Merge Log", lines=4)
gr.Markdown("## Claim–Evidence Matrix (CEM)")
cem_dataframe = gr.Dataframe(value=cem_load(), interactive=True, wrap=True)
with gr.Row():
cem_save_btn = gr.Button("Save CEM")
gen_btn.click(run_generator, [task, inputs, playbook], [answer, trace, used, cpt])
ref_btn.click(run_reflector, [task_id, trace], deltas)
cur_btn.click(run_curator, [playbook, deltas], [playbook, diff])
save_pb_btn.click(lambda t: (save_playbook(yaml.safe_load(t)), t)[1], playbook, playbook)
cem_save_btn.click(cem_save, cem_dataframe, cem_dataframe)
if __name__ == "__main__":
demo.launch()