AGENT_INTEGRATION_TEMPLATE.ex
1 # Copy this into any ECHO agent to add session-based LLM consultation 2 # Replace AGENT_NAME and :agent_role with your agent's details 3 4 defmodule AGENT_NAME do 5 use EchoShared.MCP.Server 6 7 @impl true 8 def agent_info do 9 %{ 10 name: "agent-name", 11 version: "1.0.0", 12 role: :agent_role, # ← Change this: :ceo, :cto, :chro, etc. 13 llm_model: "model-name:version" 14 } 15 end 16 17 @impl true 18 def tools do 19 [ 20 # ... your existing tools ... 21 22 # ============================================================ 23 # NEW: Session-based AI consultation with conversation memory 24 # ============================================================ 25 %{ 26 name: "session_consult", 27 description: """ 28 Query the AI assistant with conversation memory (LocalCode-style). 29 30 Maintains multi-turn conversations with automatic context injection: 31 - Your role, responsibilities, and authority limits 32 - Recent decisions and messages (last 5 each) 33 - Current system status (PostgreSQL, Redis, Ollama) 34 - Git context (branch, last commit) 35 - Conversation history (last 5 turns) 36 37 Perfect for: 38 - Exploratory questions about your role 39 - Decision analysis with iterative thinking 40 - Strategy planning with follow-up questions 41 - Learning from past decisions 42 43 Example workflow: 44 1. Ask: "What should I prioritize this quarter?" 45 2. Follow-up: "Tell me more about priority #2" 46 3. Deep dive: "What are the risks with that approach?" 47 """, 48 inputSchema: %{ 49 type: "object", 50 properties: %{ 51 question: %{ 52 type: "string", 53 description: "The question to ask the AI assistant", 54 minLength: 1 55 }, 56 session_id: %{ 57 type: "string", 58 description: """ 59 Session ID to continue an existing conversation. 60 Omit this field to start a new session. 61 Session IDs look like: "ceo_1699564234_123456" 62 """, 63 }, 64 context: %{ 65 type: "string", 66 description: """ 67 Additional context for this specific query. 68 Example: "Budget: $5M, Timeline: Q1 2025, Team size: 50" 69 """ 70 } 71 }, 72 required: ["question"] 73 } 74 } 75 ] 76 end 77 78 @impl true 79 def execute_tool(tool_name, args) do 80 case tool_name do 81 # ... your existing tool handlers ... 82 83 # ============================================================ 84 # NEW: Session consultation handler 85 # ============================================================ 86 "session_consult" -> 87 execute_session_consult(args) 88 89 _ -> 90 {:error, "Unknown tool: #{tool_name}"} 91 end 92 end 93 94 # ============================================================ 95 # NEW: Session-based consultation implementation 96 # ============================================================ 97 98 defp execute_session_consult(args) do 99 alias EchoShared.LLM.DecisionHelper 100 101 # Extract arguments 102 question = Map.fetch!(args, "question") 103 session_id = Map.get(args, "session_id") # nil for new session 104 context = Map.get(args, "context") 105 106 # Build options 107 opts = if context, do: [context: context], else: [] 108 109 # Query LLM with session memory 110 case DecisionHelper.consult_session(agent_role(), session_id, question, opts) do 111 {:ok, result} -> 112 # Format successful response 113 response = format_session_response(result) 114 {:ok, response} 115 116 {:error, :llm_disabled} -> 117 {:error, "LLM is disabled for #{agent_role()}. Enable with LLM_ENABLED=true or #{agent_role() |> Atom.to_string() |> String.upcase()}_LLM_ENABLED=true"} 118 119 {:error, :session_not_found} -> 120 {:error, "Session not found: #{session_id}. It may have expired after 1 hour of inactivity."} 121 122 {:error, reason} -> 123 {:error, "AI consultation failed: #{inspect(reason)}"} 124 end 125 end 126 127 defp format_session_response(result) do 128 # Get model info 129 model = EchoShared.LLM.Config.get_model(agent_role()) 130 131 # Base response 132 base = %{ 133 response: result.response, 134 session_id: result.session_id, 135 turn_count: result.turn_count, 136 estimated_tokens: result.total_tokens, 137 model: model, 138 agent: agent_role() 139 } 140 141 # Add warnings if context is getting large 142 if result.warnings != [] do 143 Map.put(base, :warnings, result.warnings) 144 else 145 base 146 end 147 end 148 149 # ============================================================ 150 # IMPORTANT: Set your agent role here! 151 # ============================================================ 152 defp agent_role do 153 # Change this to match your agent: 154 :ceo # CEO agent 155 # :cto # CTO agent 156 # :chro # CHRO agent 157 # :operations_head # Operations Head agent 158 # :product_manager # Product Manager agent 159 # :senior_architect # Senior Architect agent 160 # :uiux_engineer # UI/UX Engineer agent 161 # :senior_developer # Senior Developer agent 162 # :test_lead # Test Lead agent 163 end 164 165 # ... rest of your agent code ... 166 end 167 168 # ============================================================ 169 # USAGE EXAMPLES 170 # ============================================================ 171 172 # Example 1: Start new session 173 # { 174 # "tool": "session_consult", 175 # "arguments": { 176 # "question": "What are my top priorities as CEO?" 177 # } 178 # } 179 # 180 # Response: 181 # { 182 # "response": "As CEO, your top priorities should be:\n1. Strategic planning...", 183 # "session_id": "ceo_1699564234_123456", 184 # "turn_count": 1, 185 # "estimated_tokens": 1876, 186 # "model": "llama3.1:8b", 187 # "agent": "ceo" 188 # } 189 190 # Example 2: Continue conversation 191 # { 192 # "tool": "session_consult", 193 # "arguments": { 194 # "session_id": "ceo_1699564234_123456", 195 # "question": "Tell me more about priority #2" 196 # } 197 # } 198 # 199 # Response: 200 # { 201 # "response": "Regarding strategic planning, you should focus on...", 202 # "session_id": "ceo_1699564234_123456", 203 # "turn_count": 2, 204 # "estimated_tokens": 2341, 205 # "model": "llama3.1:8b", 206 # "agent": "ceo" 207 # } 208 209 # Example 3: With additional context 210 # { 211 # "tool": "session_consult", 212 # "arguments": { 213 # "question": "Should we approve this budget request?", 214 # "context": "Budget: $2.5M for datacenter. Cash reserves: $10M." 215 # } 216 # } 217 218 # Example 4: Context warning (after 8-10 turns) 219 # { 220 # "response": "Based on our discussion...", 221 # "session_id": "ceo_1699564234_123456", 222 # "turn_count": 9, 223 # "estimated_tokens": 4523, 224 # "model": "llama3.1:8b", 225 # "agent": "ceo", 226 # "warnings": [ 227 # "Session has 9 turns. Consider ending session soon.", 228 # "Context size large (4523 tokens). Session approaching limit." 229 # ] 230 # } 231 232 # ============================================================ 233 # TESTING 234 # ============================================================ 235 236 # Test in IEx: 237 # iex -S mix 238 # iex> alias EchoShared.LLM.DecisionHelper 239 # iex> {:ok, r1} = DecisionHelper.consult_session(:ceo, nil, "What's my role?") 240 # iex> IO.puts(r1.response) 241 # iex> {:ok, r2} = DecisionHelper.consult_session(:ceo, r1.session_id, "What are my priorities?") 242 # iex> IO.puts(r2.response) 243 244 # ============================================================ 245 # CONFIGURATION 246 # ============================================================ 247 248 # Models configured in apps/echo_shared/config/dev.exs: 249 # config :echo_shared, :agent_models, %{ 250 # ceo: "llama3.1:8b", 251 # cto: "deepseek-coder:6.7b", 252 # chro: "llama3.1:8b", 253 # operations_head: "mistral:7b", 254 # product_manager: "llama3.1:8b", 255 # senior_architect: "deepseek-coder:6.7b", 256 # uiux_engineer: "llama3.1:8b", 257 # senior_developer: "deepseek-coder:6.7b", 258 # test_lead: "deepseek-coder:6.7b" 259 # } 260 261 # Override via environment: 262 # export CEO_MODEL=qwen2.5:14b 263 # export CEO_LLM_ENABLED=false