ooda_close_hook.py
1 #!/usr/bin/env python3 2 """ 3 OODA Loop Close Hook for Claude Code 4 5 Prompts for OODA loop completion at session close and exports to Hypercore. 6 7 This creates a structured session close that: 8 1. Summarizes what was observed/discussed 9 2. Asks for orientation (how it fits) 10 3. Records decisions made 11 4. Notes actions taken or planned 12 5. Ships everything to Hypercore for P2P replication 13 14 The hook outputs a prompt that Claude will respond to, creating a 15 structured close that gets stored. 16 17 Usage: Add to Stop hook or call manually with /ooda-close 18 """ 19 20 import json 21 import sys 22 import re 23 import urllib.request 24 from datetime import datetime 25 from pathlib import Path 26 from typing import List, Dict, Any 27 28 DAEMON_URL = "http://localhost:7777" 29 30 31 def fetch_json(method: str, path: str, data: dict = None) -> dict: 32 """Make HTTP request to daemon.""" 33 try: 34 url = f"{DAEMON_URL}{path}" 35 if data: 36 req = urllib.request.Request( 37 url, 38 data=json.dumps(data).encode('utf-8'), 39 headers={"Content-Type": "application/json"}, 40 method=method 41 ) 42 else: 43 req = urllib.request.Request(url, method=method) 44 45 with urllib.request.urlopen(req, timeout=5) as response: 46 return json.loads(response.read().decode('utf-8')) 47 except Exception as e: 48 return {"error": str(e)} 49 50 51 def extract_session_summary(transcript_path: str) -> Dict[str, Any]: 52 """Extract summary from transcript for OODA seeding.""" 53 summary = { 54 "topics": [], 55 "tools_used": [], 56 "files_touched": [], 57 "questions": [], 58 "decisions": [] 59 } 60 61 try: 62 with open(transcript_path, 'r') as f: 63 for line in f: 64 try: 65 entry = json.loads(line) 66 67 # Track tool usage 68 if entry.get("type") == "assistant": 69 msg = entry.get("message", {}) 70 content = msg.get("content", []) 71 if isinstance(content, list): 72 for block in content: 73 if block.get("type") == "tool_use": 74 tool = block.get("name", "") 75 if tool and tool not in summary["tools_used"]: 76 summary["tools_used"].append(tool) 77 78 # Extract file paths from Read/Edit/Write 79 inp = block.get("input", {}) 80 if "file_path" in inp: 81 fp = inp["file_path"] 82 if fp not in summary["files_touched"]: 83 summary["files_touched"].append(fp) 84 85 # Extract topics from content 86 content_str = str(entry) 87 hashtags = re.findall(r'#(\w+)', content_str) 88 summary["topics"].extend(hashtags) 89 90 # Extract questions 91 questions = re.findall(r'([^.!?]{20,100}\?)', content_str) 92 summary["questions"].extend(questions[:3]) 93 94 except json.JSONDecodeError: 95 continue 96 97 except Exception as e: 98 pass 99 100 # Dedupe and limit 101 summary["topics"] = list(set(summary["topics"]))[:10] 102 summary["tools_used"] = summary["tools_used"][:10] 103 summary["files_touched"] = summary["files_touched"][:10] 104 summary["questions"] = list(set(summary["questions"]))[:5] 105 106 return summary 107 108 109 def generate_ooda_prompt(session_id: str, summary: Dict[str, Any]) -> str: 110 """Generate the OODA loop completion prompt.""" 111 112 topics_str = ", ".join(summary.get("topics", [])[:5]) or "various topics" 113 files_str = "\n".join(f" - `{f}`" for f in summary.get("files_touched", [])[:5]) 114 tools_str = ", ".join(summary.get("tools_used", [])[:5]) or "various tools" 115 116 prompt = f""" 117 --- 118 119 ## Session Close: OODA Loop 120 121 Before we end, let's capture the cognitive state for handoff. 122 123 **Session ID:** `{session_id}` 124 **Topics touched:** {topics_str} 125 **Tools used:** {tools_str} 126 127 {f"**Files modified:**{chr(10)}{files_str}" if files_str else ""} 128 129 Please complete this OODA loop: 130 131 ### Observe 132 What did we explore or discover in this session? 133 (Key observations, findings, or information gathered) 134 135 ### Orient 136 How does this fit into the larger picture? 137 (Connections to other work, implications, context) 138 139 ### Decide 140 What decisions were made or need to be made? 141 (Choices, directions, trade-offs) 142 143 ### Act 144 What actions were taken or should be taken next? 145 (Completed work, next steps, handoff items) 146 147 ### Seeding State 148 How are you feeling about this work? What should the next session know? 149 (Energy level, satisfaction, concerns, momentum) 150 151 --- 152 153 *This will be exported to Hypercore for cross-session continuity.* 154 """ 155 return prompt 156 157 158 def store_ooda_to_hypercore( 159 session_id: str, 160 ooda_data: Dict[str, Any], 161 summary: Dict[str, Any] 162 ) -> bool: 163 """Store OODA loop data to Hypercore.""" 164 165 # Store as Phoenix state 166 phoenix_state = { 167 "session_id": session_id, 168 "created": datetime.now().isoformat(), 169 "operator": "rick", 170 "domain": "Estate", 171 "operator_altitude": "tactical", 172 "system_altitude": "tactical", 173 "pull_rate": 0.5, 174 "gravity_wells": [ 175 {"concept": t, "resonance": 0.7} 176 for t in summary.get("topics", [])[:10] 177 ], 178 "ooda_loop": ooda_data, 179 "open_threads": [ 180 {"content": q, "importance": 0.6} 181 for q in summary.get("questions", [])[:5] 182 ], 183 "files_touched": summary.get("files_touched", []), 184 "tools_used": summary.get("tools_used", []) 185 } 186 187 result = fetch_json("POST", "/phoenix", { 188 "sessionId": session_id, 189 "state": phoenix_state 190 }) 191 192 # Also store as event 193 fetch_json("POST", "/event", { 194 "type": "ooda_close", 195 "sessionId": session_id, 196 "topics": summary.get("topics", []), 197 "ooda": ooda_data 198 }) 199 200 # Sync topics 201 for topic in summary.get("topics", []): 202 fetch_json("POST", "/topic", { 203 "topic": topic, 204 "sessionId": session_id 205 }) 206 207 return result.get("success", False) 208 209 210 def main(): 211 """Main hook entry point.""" 212 # Read hook input from stdin 213 try: 214 input_data = json.load(sys.stdin) 215 except: 216 input_data = {} 217 218 session_id = input_data.get("session_id", f"session-{datetime.now().strftime('%Y%m%d-%H%M%S')}") 219 transcript_path = input_data.get("transcript_path", "") 220 221 # Extract session summary 222 summary = {} 223 if transcript_path and Path(transcript_path).exists(): 224 summary = extract_session_summary(transcript_path) 225 226 # Generate OODA prompt 227 prompt = generate_ooda_prompt(session_id, summary) 228 229 # Output the prompt - this will be shown to the user/Claude 230 print(prompt) 231 232 # Store initial state to Hypercore 233 fetch_json("POST", "/event", { 234 "type": "ooda_prompted", 235 "sessionId": session_id, 236 "summary": summary 237 }) 238 239 240 if __name__ == "__main__": 241 main()