This repository has been archived on 2026-04-03. You can view files and clone it, but cannot push or open issues or pull requests.
agent-runtime/agent/graphs/v3_framed.py
Nico cf42951b77 Implement config-driven models (Phase 1): graph MODELS dict, instantiate applies, per-request overrides
- Graph definitions (v3, v4) now declare MODELS mapping role → model string
- engine.py extracts MODELS and applies to nodes during instantiation
- frame_engine.process_message() accepts model_overrides for per-request swaps
  (restored via try/finally after processing)
- 11/11 engine tests green

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-03 18:05:21 +02:00

75 lines
2.8 KiB
Python

"""v3-framed: Frame-based deterministic pipeline.
Same node topology as v2-director-drives but executed by FrameEngine
with tick-based deterministic ordering.
Frame trace:
Reflex: F1(Input) → F2(Output)
Simple: F1(Input) → F2(Director) → F3(Thinker) → F4(Output+UI)
With tools: F1(Input) → F2(Director) → F3(Thinker) → F4(Interpreter) → F5(Output+UI)
"""
NAME = "v3-framed"
DESCRIPTION = "Frame-based deterministic pipeline (Director+Thinker+Interpreter)"
ENGINE = "frames" # Signals Runtime to use FrameEngine instead of handle_message()
NODES = {
"input": "input_v1",
"director": "director_v2",
"thinker": "thinker_v2",
"interpreter": "interpreter_v1",
"output": "output_v1",
"ui": "ui",
"memorizer": "memorizer_v1",
"sensor": "sensor",
}
EDGES = [
# Data edges — same as v2, engine reads for frame routing
{"from": "input", "to": "director", "type": "data", "carries": "Command"},
{"from": "input", "to": "output", "type": "data", "carries": "Command",
"condition": "reflex"},
{"from": "director", "to": "thinker", "type": "data", "carries": "DirectorPlan"},
{"from": "thinker", "to": ["output", "ui"], "type": "data",
"carries": "ThoughtResult", "parallel": True},
{"from": "thinker", "to": "interpreter", "type": "data",
"carries": "tool_output", "condition": "has_tool_output"},
{"from": "interpreter", "to": "output", "type": "data",
"carries": "InterpretedResult", "condition": "has_tool_output"},
{"from": "output", "to": "memorizer", "type": "data", "carries": "history"},
# Context edges
{"from": "memorizer", "to": "director", "type": "context",
"method": "get_context_block"},
{"from": "memorizer", "to": "input", "type": "context",
"method": "get_context_block"},
{"from": "memorizer", "to": "output", "type": "context",
"method": "get_context_block"},
{"from": "director", "to": "output", "type": "context",
"method": "get_context_line"},
{"from": "sensor", "to": "director", "type": "context",
"method": "get_context_lines"},
{"from": "ui", "to": "director", "type": "context",
"method": "get_machine_summary"},
# State edges
{"from": "sensor", "to": "runtime", "type": "state", "reads": "flags"},
{"from": "ui", "to": "runtime", "type": "state", "reads": "current_controls"},
]
CONDITIONS = {
"reflex": "intent==social AND complexity==trivial",
"has_tool_output": "thinker.tool_used is not empty",
}
MODELS = {
"input": "google/gemini-2.0-flash-001",
"director": "anthropic/claude-haiku-4.5",
"thinker": "google/gemini-2.0-flash-001",
"interpreter": "google/gemini-2.0-flash-001",
"output": "google/gemini-2.0-flash-001",
"memorizer": "google/gemini-2.0-flash-001",
}
AUDIT = {}