- /api/chat accepts {"models": {"role": "provider/model"}} for per-request overrides
- runtime.handle_message passes model_overrides through to frame engine
- All 4 graph definitions (v1-v4) now declare MODELS dicts
- test_graph_has_models expanded to verify all graphs
- 11/11 engine tests green
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
68 lines
2.5 KiB
Python
68 lines
2.5 KiB
Python
"""v1-current: Original pipeline — Input -> Thinker -> Output+UI -> Memo -> Director.
|
|
|
|
Thinker does everything (reasoning, tools, DB, UI, audit).
|
|
Director is passive (style adjustments) with optional Opus pre-planning for complex requests.
|
|
S3* audit compensates for Thinker weakness (code-without-tools, intent-without-action).
|
|
"""
|
|
|
|
NAME = "v1-current"
|
|
DESCRIPTION = "Original pipeline: Thinker does everything, S3* audits failures"
|
|
|
|
NODES = {
|
|
"input": "input_v1",
|
|
"thinker": "thinker_v1",
|
|
"output": "output_v1",
|
|
"ui": "ui",
|
|
"memorizer": "memorizer_v1",
|
|
"director": "director_v1",
|
|
"sensor": "sensor",
|
|
}
|
|
|
|
EDGES = [
|
|
# Data edges — typed objects flowing through pipeline
|
|
{"from": "input", "to": "thinker", "type": "data", "carries": "Command"},
|
|
{"from": "input", "to": "output", "type": "data", "carries": "Command",
|
|
"condition": "reflex"},
|
|
{"from": "thinker", "to": ["output", "ui"], "type": "data",
|
|
"carries": "ThoughtResult", "parallel": True},
|
|
{"from": "output", "to": "memorizer", "type": "data", "carries": "history"},
|
|
{"from": "memorizer", "to": "director", "type": "data", "carries": "memo_state"},
|
|
|
|
# Context edges — text injected into LLM prompts
|
|
{"from": "memorizer", "to": "thinker", "type": "context",
|
|
"method": "get_context_block"},
|
|
{"from": "memorizer", "to": "input", "type": "context",
|
|
"method": "get_context_block"},
|
|
{"from": "memorizer", "to": "output", "type": "context",
|
|
"method": "get_context_block"},
|
|
{"from": "director", "to": "thinker", "type": "context",
|
|
"method": "get_context_line"},
|
|
{"from": "sensor", "to": "thinker", "type": "context",
|
|
"method": "get_context_lines"},
|
|
{"from": "ui", "to": "thinker", "type": "context",
|
|
"method": "get_machine_summary"},
|
|
|
|
# State edges — shared persistent state
|
|
{"from": "sensor", "to": "runtime", "type": "state", "reads": "flags"},
|
|
{"from": "ui", "to": "runtime", "type": "state", "reads": "current_controls"},
|
|
]
|
|
|
|
CONDITIONS = {
|
|
"reflex": "intent==social AND complexity==trivial",
|
|
"plan_first": "complexity==complex OR is_data_request",
|
|
}
|
|
|
|
MODELS = {
|
|
"input": "google/gemini-2.0-flash-001",
|
|
"thinker": "openai/gpt-4o-mini",
|
|
"output": "google/gemini-2.0-flash-001",
|
|
"memorizer": "google/gemini-2.0-flash-001",
|
|
"director": "google/gemini-2.0-flash-001",
|
|
}
|
|
|
|
AUDIT = {
|
|
"code_without_tools": True,
|
|
"intent_without_action": True,
|
|
"workspace_mismatch": True,
|
|
}
|