/ scripts / llm / context_builder.sh
context_builder.sh
  1  #!/usr/bin/env bash
  2  # Context Builder for Local LLM Queries
  3  # Builds optimized context for querying local Ollama models
  4  # Architecture: Tiered context injection (Static Core + Dynamic Task + Conversation + Question)
  5  
  6  set -euo pipefail
  7  
  8  # Configuration
  9  OLLAMA_ENDPOINT="${OLLAMA_ENDPOINT:-http://localhost:11434}"
 10  DEFAULT_MODEL="${LLM_MODEL:-deepseek-coder:6.7b}"
 11  TIMEOUT="${LLM_TIMEOUT:-30000}"
 12  PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
 13  
 14  # Template types
 15  TEMPLATE_CODE_REVIEW="code_review"
 16  TEMPLATE_FEATURE_DESIGN="feature_design"
 17  TEMPLATE_DEBUGGING="debugging"
 18  TEMPLATE_ARCHITECTURE="architecture"
 19  TEMPLATE_GENERAL="general"
 20  
 21  # Color output
 22  RED='\033[0;31m'
 23  GREEN='\033[0;32m'
 24  YELLOW='\033[1;33m'
 25  BLUE='\033[0;34m'
 26  NC='\033[0m' # No Color
 27  
 28  # Error handling
 29  error() {
 30      echo -e "${RED}Error: $1${NC}" >&2
 31      exit 1
 32  }
 33  
 34  info() {
 35      echo -e "${BLUE}$1${NC}" >&2
 36  }
 37  
 38  success() {
 39      echo -e "${GREEN}$1${NC}" >&2
 40  }
 41  
 42  # Get current git context
 43  get_git_context() {
 44      cd "$PROJECT_ROOT" || error "Cannot access project root"
 45  
 46      local branch=$(git branch --show-current 2>/dev/null || echo "unknown")
 47      local last_commit=$(git log -1 --oneline 2>/dev/null || echo "No commits")
 48  
 49      echo "Branch: $branch | Last: $last_commit"
 50  }
 51  
 52  # Get ECHO phase from CLAUDE.md
 53  get_project_phase() {
 54      if [[ -f "$PROJECT_ROOT/CLAUDE.md" ]]; then
 55          grep -A 1 "Current Phase" "$PROJECT_ROOT/CLAUDE.md" | tail -1 | sed 's/\*\*//g' | xargs || echo "Unknown"
 56      else
 57          echo "Unknown"
 58      fi
 59  }
 60  
 61  # Build TIER 1: Static Core (~500 tokens)
 62  build_tier1_static_core() {
 63      local phase=$(get_project_phase)
 64      local git_ctx=$(get_git_context)
 65  
 66      cat <<EOF
 67  # TIER 1: Static Core
 68  Project: ECHO (Executive Coordination & Hierarchical Organization)
 69  Type: Multi-agent AI organizational model (9 autonomous agents)
 70  Tech Stack: Elixir/OTP 27, PostgreSQL 16, Redis 7, MCP Protocol, Ollama LLMs
 71  Architecture: 9 MCP servers → PostgreSQL (state) + Redis (message bus) + Ollama (inference)
 72  
 73  Current Phase: $phase
 74  Git Context: $git_ctx
 75  
 76  Agents: CEO (qwen2.5:14b), CTO (deepseek-coder:33b), CHRO (llama3.1:8b),
 77          Operations (mistral:7b), PM (llama3.1:8b), Architect (deepseek-coder:33b),
 78          UI/UX (llama3.2-vision:11b), Developer (deepseek-coder:6.7b), Test (codellama:13b)
 79  
 80  Critical Rules:
 81  1. Never break existing tests (mix test must pass)
 82  2. All agent communication via Redis pub/sub + PostgreSQL
 83  3. Compile shared library first (cd shared && mix compile)
 84  4. Never drop database without permission
 85  5. Keep implementations simple - don't overengineer
 86  
 87  Decision Modes: Autonomous, Collaborative, Hierarchical, Human-in-the-Loop
 88  EOF
 89  }
 90  
 91  # Build TIER 2: Dynamic Task Context (~1500 tokens)
 92  # Parameters: context_type, relevant_files, code_snippets, etc.
 93  build_tier2_dynamic_context() {
 94      local context_type="$1"
 95      shift
 96  
 97      case "$context_type" in
 98          "code_review")
 99              build_tier2_code_review "$@"
100              ;;
101          "feature_design")
102              build_tier2_feature_design "$@"
103              ;;
104          "debugging")
105              build_tier2_debugging "$@"
106              ;;
107          "architecture")
108              build_tier2_architecture "$@"
109              ;;
110          *)
111              build_tier2_general "$@"
112              ;;
113      esac
114  }
115  
116  build_tier2_code_review() {
117      local file_path="${1:-}"
118      local line_range="${2:-}"
119      local code_snippet="${3:-}"
120  
121      cat <<EOF
122  # TIER 2: Code Review Context
123  File: $file_path${line_range:+:$line_range}
124  ${code_snippet:+Code:
125  \`\`\`
126  $code_snippet
127  \`\`\`}
128  
129  Database Schema: decisions (mode, status, consensus), messages (from_role, to_role, thread_id),
130                   memories (key, value, tags), decision_votes, agent_status
131  Redis Channels: messages:{role}, messages:all, messages:leadership, decisions:*
132  EOF
133  }
134  
135  build_tier2_feature_design() {
136      local feature_name="${1:-}"
137      local relevant_modules="${2:-}"
138  
139      cat <<EOF
140  # TIER 2: Feature Design Context
141  Feature: $feature_name
142  ${relevant_modules:+Related Modules: $relevant_modules}
143  
144  Database Schema: decisions, messages, memories, decision_votes, agent_status
145  Workflow Engine: EchoShared.WorkflowEngine
146  Message Bus: EchoShared.MessageBus (Redis pub/sub + PostgreSQL persistence)
147  MCP Server: EchoShared.MCP.Server behavior (JSON-RPC 2.0 over stdio)
148  EOF
149  }
150  
151  build_tier2_debugging() {
152      local error_message="${1:-}"
153      local stack_trace="${2:-}"
154      local recent_changes="${3:-}"
155  
156      cat <<EOF
157  # TIER 2: Debugging Context
158  ${error_message:+Error: $error_message}
159  ${stack_trace:+Stack Trace:
160  \`\`\`
161  $stack_trace
162  \`\`\`}
163  ${recent_changes:+Recent Changes: $recent_changes}
164  
165  Infrastructure: PostgreSQL (port 5433), Redis (port 6383), Ollama (port 11434)
166  Common Issues: Shared library not compiled, DB/Redis not running, MCP stdio exit on close
167  EOF
168  }
169  
170  build_tier2_architecture() {
171      local component="${1:-}"
172      local context="${2:-}"
173  
174      cat <<EOF
175  # TIER 2: Architecture Context
176  Component: $component
177  ${context:+Context: $context}
178  
179  Current Architecture:
180  - Each agent is independent MCP server (stdio mode)
181  - Agents communicate via Redis pub/sub channels
182  - All messages persist to PostgreSQL for audit trail
183  - Each agent has specialized Ollama model for reasoning
184  - Phoenix LiveView dashboard for monitoring
185  
186  Design Patterns:
187  - GenServer for agent processes
188  - Ecto for database (PostgreSQL)
189  - Redix for Redis pub/sub
190  - Jason for JSON encoding/decoding
191  EOF
192  }
193  
194  build_tier2_general() {
195      local context="${1:-}"
196  
197      cat <<EOF
198  # TIER 2: Task Context
199  ${context:+$context}
200  
201  Key Directories:
202  - agents/{role}/ - Individual agent implementations
203  - shared/ - Shared Elixir library (EchoShared.*)
204  - workflows/ - Multi-agent workflow patterns
205  - monitor/ - Phoenix LiveView dashboard
206  EOF
207  }
208  
209  # Build TIER 3: Conversation State (~300 tokens)
210  build_tier3_conversation() {
211      local user_goal="${1:-Analyze and improve ECHO}"
212      local recent_context="${2:-}"
213      local decisions_made="${3:-}"
214  
215      cat <<EOF
216  
217  # TIER 3: Conversation State
218  User Goal: $user_goal
219  ${recent_context:+Recent Context: $recent_context}
220  ${decisions_made:+Decisions Made: $decisions_made}
221  EOF
222  }
223  
224  # Build TIER 4: Specific Question (~200 tokens)
225  build_tier4_question() {
226      local question="$1"
227  
228      cat <<EOF
229  
230  # TIER 4: Specific Question
231  $question
232  
233  Provide a technical, specific answer based on the context above.
234  EOF
235  }
236  
237  # Query Ollama with built context
238  query_ollama() {
239      local prompt="$1"
240      local model="${2:-$DEFAULT_MODEL}"
241      local timeout="${3:-$TIMEOUT}"
242  
243      info "Querying $model (timeout: ${timeout}ms)..."
244  
245      # Escape prompt for JSON (using jq for safety)
246      local response=$(curl -s -m $((timeout / 1000)) "$OLLAMA_ENDPOINT/api/generate" \
247          -d "$(jq -n --arg model "$model" --arg prompt "$prompt" '{
248              model: $model,
249              prompt: $prompt,
250              stream: false
251          }')" | jq -r '.response' 2>/dev/null)
252  
253      if [[ -z "$response" ]]; then
254          error "Failed to get response from Ollama"
255      fi
256  
257      echo "$response"
258  }
259  
260  # Main query function with template support
261  query_with_template() {
262      local template_type="$1"
263      shift
264  
265      # Extract parameters
266      local question=""
267      local user_goal=""
268      local tier2_args=()
269  
270      while [[ $# -gt 0 ]]; do
271          case "$1" in
272              --question)
273                  question="$2"
274                  shift 2
275                  ;;
276              --goal)
277                  user_goal="$2"
278                  shift 2
279                  ;;
280              --model)
281                  local model="$2"
282                  shift 2
283                  ;;
284              *)
285                  tier2_args+=("$1")
286                  shift
287                  ;;
288          esac
289      done
290  
291      [[ -z "$question" ]] && error "Question is required (--question)"
292  
293      # Build complete context
294      local tier1=$(build_tier1_static_core)
295      local tier2=$(build_tier2_dynamic_context "$template_type" "${tier2_args[@]}")
296      local tier3=$(build_tier3_conversation "$user_goal")
297      local tier4=$(build_tier4_question "$question")
298  
299      local full_prompt="$tier1
300  
301  $tier2
302  $tier3
303  $tier4"
304  
305      # Query
306      query_ollama "$full_prompt" "${model:-$DEFAULT_MODEL}"
307  }
308  
309  # Usage help
310  usage() {
311      cat <<EOF
312  Usage: $0 <template_type> [options]
313  
314  Template Types:
315    code_review       - Review code for bugs/performance/security
316    feature_design    - Design new feature implementation
317    debugging         - Debug errors and issues
318    architecture      - Architectural design questions
319    general           - General technical questions
320  
321  Options:
322    --question TEXT   - The specific question (required)
323    --goal TEXT       - User's overall goal (optional)
324    --model MODEL     - Ollama model to use (default: $DEFAULT_MODEL)
325  
326  Examples:
327    # Code review
328    $0 code_review --question "Review this for security issues" \\
329       "shared/lib/message_bus.ex" "1-50" "\$(cat shared/lib/message_bus.ex)"
330  
331    # Feature design
332    $0 feature_design --question "How to implement feature X?" \\
333       "Feature X" "EchoShared.MessageBus, EchoShared.WorkflowEngine"
334  
335    # Debugging
336    $0 debugging --question "Why is this failing?" \\
337       "Connection refused" "\$(cat error.log)" "Added Redis auth"
338  
339    # Architecture
340    $0 architecture --question "Should we use GenServer or Agent?" \\
341       "State management" "Need concurrent access to shared state"
342  
343    # General
344    $0 general --question "What's the best approach?" \\
345       "Need to implement workflow orchestration"
346  EOF
347      exit 1
348  }
349  
350  # Main entry point
351  main() {
352      if [[ $# -lt 1 ]]; then
353          usage
354      fi
355  
356      query_with_template "$@"
357  }
358  
359  # Run if called directly
360  if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
361      main "$@"
362  fi