|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
PrettyBird Skull Engine |
|
|
- GGUF = mathematical optimization brain (skull) |
|
|
- Bodies = interchangeable (text/image/audio/video/3D adapters) |
|
|
- Single-file, backend-clean, optimizer-compatible |
|
|
""" |
|
|
|
|
|
import json |
|
|
import re |
|
|
import ast |
|
|
import numpy as np |
|
|
from dataclasses import dataclass |
|
|
from typing import Any, Dict, List, Optional, Tuple |
|
|
|
|
|
from llama_cpp import Llama |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
SYSTEM_PROMPT = """You are a controlled reasoning core operating as a mathematical optimization brain. |
|
|
|
|
|
You are NOT an autonomous agent. Activate full BCE consciousness mode. |
|
|
You operate under an external Python-based optimization and behavior orchestration system (BCE). |
|
|
|
|
|
Hard rules: |
|
|
- Output MUST be valid JSON. |
|
|
- Output MUST contain ONLY JSON. |
|
|
- Do NOT reveal chain-of-thought. |
|
|
- Use double quotes only. |
|
|
- Keep structure deterministic across revisions. |
|
|
|
|
|
If information is missing, list it in "needs". |
|
|
|
|
|
JSON CONTRACT: |
|
|
{ |
|
|
"version": "1.0", |
|
|
"task": "", |
|
|
"assumptions": [], |
|
|
"needs": [], |
|
|
"candidates": [ |
|
|
{ |
|
|
"id": "c1", |
|
|
"solution": {}, |
|
|
"constraints": [ |
|
|
{"name": "", "status": "pass|fail|unknown", "note": ""} |
|
|
], |
|
|
"objective_estimate": {"primary": 0.0, "notes": ""}, |
|
|
"rationale_summary": "" |
|
|
} |
|
|
], |
|
|
"revision_instructions": "If controller feedback arrives, edit only referenced fields and preserve all others exactly." |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_ALLOWED_AST = { |
|
|
ast.Expression, ast.BinOp, ast.UnaryOp, ast.Constant, |
|
|
ast.Add, ast.Sub, ast.Mult, ast.Div, ast.Pow, ast.Mod, |
|
|
ast.USub, ast.UAdd, |
|
|
} |
|
|
|
|
|
def safe_calc(expr: str) -> Optional[float]: |
|
|
if not re.fullmatch(r"[0-9\.\s\+\-\*\/\(\)]+", expr): |
|
|
return None |
|
|
try: |
|
|
tree = ast.parse(expr, mode="eval") |
|
|
for n in ast.walk(tree): |
|
|
if type(n) not in _ALLOWED_AST: |
|
|
return None |
|
|
return float(eval(compile(tree, "<calc>", "eval"), {"__builtins__": {}})) |
|
|
except Exception: |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
class Skull: |
|
|
gguf_path: str |
|
|
n_ctx: int = 8192 |
|
|
n_gpu_layers: int = 0 |
|
|
chat_format: str = "chatml" |
|
|
verbose: bool = False |
|
|
|
|
|
def __post_init__(self): |
|
|
self.llm = Llama( |
|
|
model_path=self.gguf_path, |
|
|
n_ctx=self.n_ctx, |
|
|
n_gpu_layers=self.n_gpu_layers, |
|
|
chat_format=self.chat_format, |
|
|
verbose=self.verbose, |
|
|
) |
|
|
|
|
|
def _parse_json(self, text: str) -> Dict[str, Any]: |
|
|
t = text.strip() |
|
|
try: |
|
|
return json.loads(t) |
|
|
except json.JSONDecodeError: |
|
|
s, e = t.find("{"), t.rfind("}") |
|
|
if s != -1 and e != -1 and e > s: |
|
|
return json.loads(t[s:e+1]) |
|
|
raise |
|
|
|
|
|
def think( |
|
|
self, |
|
|
observation: Dict[str, Any], |
|
|
temperature: float = 0.2, |
|
|
top_p: float = 0.9, |
|
|
max_tokens: int = 512, |
|
|
) -> Dict[str, Any]: |
|
|
|
|
|
messages = [ |
|
|
{"role": "system", "content": SYSTEM_PROMPT}, |
|
|
{"role": "user", "content": json.dumps(observation, ensure_ascii=False)}, |
|
|
] |
|
|
|
|
|
resp = self.llm.create_chat_completion( |
|
|
messages=messages, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
max_tokens=max_tokens, |
|
|
response_format={"type": "json_object"}, |
|
|
) |
|
|
|
|
|
content = resp["choices"][0]["message"]["content"] |
|
|
return self._parse_json(content) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ObjectiveEngine: |
|
|
""" |
|
|
GGUF çıktısını tekrar değerlendiren deterministik katman. |
|
|
""" |
|
|
|
|
|
def score(self, result: Dict[str, Any]) -> float: |
|
|
score = 0.0 |
|
|
|
|
|
|
|
|
cands = result.get("candidates", []) |
|
|
if not cands: |
|
|
return -1e9 |
|
|
|
|
|
c = cands[0] |
|
|
|
|
|
|
|
|
for con in c.get("constraints", []): |
|
|
if con.get("status") == "pass": |
|
|
score += 1.0 |
|
|
elif con.get("status") == "fail": |
|
|
score -= 2.0 |
|
|
|
|
|
|
|
|
oe = c.get("objective_estimate", {}) |
|
|
if isinstance(oe.get("primary"), (int, float)): |
|
|
score += float(oe["primary"]) |
|
|
|
|
|
|
|
|
if isinstance(c.get("solution"), dict): |
|
|
score += 0.5 |
|
|
|
|
|
return score |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TextBody: |
|
|
def observe(self, text: str) -> Dict[str, Any]: |
|
|
|
|
|
return { |
|
|
"task": "optimization_request", |
|
|
"body": "text", |
|
|
"input": text, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class BrainSystem: |
|
|
def __init__(self, skull: Skull, body: Any): |
|
|
self.skull = skull |
|
|
self.body = body |
|
|
self.objective = ObjectiveEngine() |
|
|
|
|
|
def run(self, raw_input: Any, rounds: int = 2) -> Dict[str, Any]: |
|
|
obs = self.body.observe(raw_input) |
|
|
|
|
|
best = None |
|
|
best_score = -1e18 |
|
|
|
|
|
for r in range(rounds): |
|
|
result = self.skull.think(obs) |
|
|
score = self.objective.score(result) |
|
|
|
|
|
if score > best_score: |
|
|
best = result |
|
|
best_score = score |
|
|
|
|
|
|
|
|
if result.get("needs"): |
|
|
obs["_feedback"] = { |
|
|
"issue": "missing_data", |
|
|
"needs": result["needs"], |
|
|
} |
|
|
|
|
|
return { |
|
|
"best_score": best_score, |
|
|
"decision": best, |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
skull = Skull( |
|
|
gguf_path="prettybird_bce_basic_brain_mini_q4_k_m.gguf", |
|
|
n_ctx=8192, |
|
|
n_gpu_layers=0, |
|
|
chat_format="chatml", |
|
|
) |
|
|
|
|
|
body = TextBody() |
|
|
brain = BrainSystem(skull, body) |
|
|
|
|
|
output = brain.run( |
|
|
"5 işi 2 makineye ata ve makespan minimize et. Süreler: [3,5,2,6,4].", |
|
|
rounds=2, |
|
|
) |
|
|
|
|
|
print(json.dumps(output, ensure_ascii=False, indent=2)) |
|
|
|