- Graph definitions (v3, v4) now declare MODELS mapping role → model string - engine.py extracts MODELS and applies to nodes during instantiation - frame_engine.process_message() accepts model_overrides for per-request swaps (restored via try/finally after processing) - 11/11 engine tests green Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
81 lines
3.0 KiB
Python
81 lines
3.0 KiB
Python
"""v4-eras: PA + Eras Expert with progress streaming.
|
|
|
|
Personal Assistant routes to the Eras expert for heating/energy DB work.
|
|
Social/general messages handled directly by PA.
|
|
|
|
Frame traces:
|
|
Reflex: F1(Input) → F2(Output)
|
|
PA direct: F1(Input) → F2(PA) → F3(Output+UI)
|
|
Expert: F1(Input) → F2(PA) → F3(ErasExpert) → F4(Output+UI)
|
|
Expert+Interp: F1(Input) → F2(PA) → F3(ErasExpert) → F4(Interpreter) → F5(Output+UI)
|
|
"""
|
|
|
|
NAME = "v4-eras"
|
|
DESCRIPTION = "PA + Eras Expert: heating/energy database with progress streaming"
|
|
ENGINE = "frames"
|
|
|
|
NODES = {
|
|
"input": "input_v1",
|
|
"pa": "pa_v1",
|
|
"expert_eras": "eras_expert",
|
|
"interpreter": "interpreter_v1",
|
|
"output": "output_v1",
|
|
"ui": "ui",
|
|
"memorizer": "memorizer_v1",
|
|
"sensor": "sensor",
|
|
}
|
|
|
|
EDGES = [
|
|
# Data edges
|
|
{"from": "input", "to": "pa", "type": "data", "carries": "Command"},
|
|
{"from": "input", "to": "output", "type": "data", "carries": "Command",
|
|
"condition": "reflex"},
|
|
{"from": "pa", "to": "expert_eras", "type": "data", "carries": "PARouting",
|
|
"condition": "expert_is_eras"},
|
|
{"from": "pa", "to": "output", "type": "data", "carries": "PARouting",
|
|
"condition": "expert_is_none"},
|
|
{"from": "expert_eras", "to": ["output", "ui"], "type": "data",
|
|
"carries": "ThoughtResult", "parallel": True},
|
|
{"from": "expert_eras", "to": "interpreter", "type": "data",
|
|
"carries": "tool_output", "condition": "has_tool_output"},
|
|
{"from": "interpreter", "to": "output", "type": "data",
|
|
"carries": "InterpretedResult", "condition": "has_tool_output"},
|
|
{"from": "output", "to": "memorizer", "type": "data", "carries": "history"},
|
|
|
|
# Context edges — PA gets all context (experts are stateless)
|
|
{"from": "memorizer", "to": "pa", "type": "context",
|
|
"method": "get_context_block"},
|
|
{"from": "memorizer", "to": "input", "type": "context",
|
|
"method": "get_context_block"},
|
|
{"from": "memorizer", "to": "output", "type": "context",
|
|
"method": "get_context_block"},
|
|
{"from": "pa", "to": "output", "type": "context",
|
|
"method": "get_context_line"},
|
|
{"from": "sensor", "to": "pa", "type": "context",
|
|
"method": "get_context_lines"},
|
|
{"from": "ui", "to": "pa", "type": "context",
|
|
"method": "get_machine_summary"},
|
|
|
|
# State edges
|
|
{"from": "sensor", "to": "runtime", "type": "state", "reads": "flags"},
|
|
{"from": "ui", "to": "runtime", "type": "state", "reads": "current_controls"},
|
|
]
|
|
|
|
CONDITIONS = {
|
|
"reflex": "intent==social AND complexity==trivial",
|
|
"expert_is_eras": "pa.expert == eras",
|
|
"expert_is_none": "pa.expert == none",
|
|
"has_tool_output": "expert.tool_used is not empty",
|
|
}
|
|
|
|
MODELS = {
|
|
"input": "google/gemini-2.0-flash-001",
|
|
"pa": "anthropic/claude-haiku-4.5",
|
|
"expert_eras": "google/gemini-2.0-flash-001",
|
|
"interpreter": "google/gemini-2.0-flash-001",
|
|
"output": "google/gemini-2.0-flash-001",
|
|
"memorizer": "google/gemini-2.0-flash-001",
|
|
}
|
|
|
|
AUDIT = {}
|