/ server / sovereign_server.py
sovereign_server.py
   1  #!/usr/bin/env python3
   2  """
   3  Sovereign OS Server
   4  
   5  The central server for all Sovereign OS services:
   6  
   7  1. Attention Daemon - Cross-session tracking, coherence risk, aha detection
   8  2. Overcast Webhook - Receives podcast OPML from iOS
   9  3. Attention API - Query attention state, stream status
  10  4. Health monitoring
  11  
  12  Run locally:
  13      python server/sovereign_server.py
  14  
  15  Run in production (with gunicorn):
  16      gunicorn -w 1 -b 0.0.0.0:5050 server.sovereign_server:app
  17  
  18  Deploy to cloud:
  19      - Fly.io: fly launch
  20      - Railway: railway up
  21      - Render: push to git
  22  """
  23  
  24  import asyncio
  25  import json
  26  import sys
  27  import threading
  28  from datetime import datetime
  29  from pathlib import Path
  30  from flask import Flask, request, jsonify, Response
  31  import xml.etree.ElementTree as ET
  32  
  33  # Add parent to path
  34  sys.path.insert(0, str(Path(__file__).parent.parent))
  35  
  36  from core.attention.daemon import AttentionDaemon
  37  from core.attention.cross_session import create_cross_session_system
  38  from core.attention.coherence_risk import create_coherence_system
  39  from core.attention.context_stream import create_context_stream
  40  from core.attention.natural_director import NaturalContextDirector, create_natural_director
  41  from core.attention.signal_words import detect_signals
  42  from core.attention.transcript_miner import TranscriptMiner
  43  from core.attention.ingestion_pipeline import IngestionPipeline, create_pipeline
  44  from core.attention.multi_source_correlator import MultiSourceCorrelator, correlate_sources
  45  from core.attention.artifact_analyzer import ArtifactAnalyzer, analyze_artifacts
  46  from core.attention.graph_integration import GraphIntegration, get_graph_integration
  47  from core.attention.unified_correlator import UnifiedCorrelator, get_unified_correlator
  48  from core.attention.voice_router import VoiceRouter, get_voice_router, process_voice_command
  49  from core.attention.visual_capture import VisualCapture, get_visual_capture
  50  from core.attention.video_processor import VideoProcessor, get_video_processor
  51  
  52  # Configuration
  53  PORT = 5050
  54  SESSIONS_DIR = Path.home() / 'repos/Sovereign_Estate/daily/sessions'
  55  DAILY_DIR = Path.home() / 'repos/Sovereign_Estate/daily'
  56  OVERCAST_DIR = Path.home() / 'repos/Sovereign_OS/data/overcast'
  57  
  58  app = Flask(__name__)
  59  
  60  # Global state (initialized on startup)
  61  daemon = None
  62  daemon_thread = None
  63  natural_director = None
  64  ingestion_pipeline = None
  65  pipeline_thread = None
  66  graph_integration = None
  67  unified_correlator = None
  68  voice_router = None
  69  visual_capture = None
  70  video_processor = None
  71  
  72  
  73  def run_daemon_loop():
  74      """Run the attention daemon in its own thread."""
  75      global daemon
  76      loop = asyncio.new_event_loop()
  77      asyncio.set_event_loop(loop)
  78      try:
  79          loop.run_until_complete(daemon.run())
  80      except Exception as e:
  81          print(f"[Daemon] Error: {e}")
  82      finally:
  83          loop.close()
  84  
  85  
  86  def run_pipeline_loop():
  87      """Run the ingestion pipeline in its own thread."""
  88      global ingestion_pipeline
  89      loop = asyncio.new_event_loop()
  90      asyncio.set_event_loop(loop)
  91      try:
  92          loop.run_until_complete(ingestion_pipeline.run_async())
  93      except Exception as e:
  94          print(f"[Pipeline] Error: {e}")
  95      finally:
  96          loop.close()
  97  
  98  
  99  def init_daemon():
 100      """Initialize the attention daemon."""
 101      global daemon, daemon_thread, natural_director, ingestion_pipeline, pipeline_thread
 102  
 103      if daemon is not None:
 104          return  # Already initialized
 105  
 106      print("[Server] Initializing attention daemon...")
 107  
 108      daemon = AttentionDaemon(
 109          sessions_dir=str(SESSIONS_DIR),
 110          daily_notes_dir=str(DAILY_DIR)
 111      )
 112  
 113      # Process initial state
 114      daemon.session_watcher.scan_sessions()
 115      daemon.session_watcher.process_updates()
 116  
 117      # Initialize natural director
 118      natural_director = create_natural_director(
 119          sessions_dir=str(SESSIONS_DIR),
 120          daily_notes_dir=str(DAILY_DIR),
 121          stream=daemon.context_stream
 122      )
 123      print("[Server] Natural context director initialized")
 124  
 125      # Initialize ingestion pipeline (standalone mode for single-machine setup)
 126      ingestion_pipeline = create_pipeline(mode='standalone')
 127      print("[Server] Ingestion pipeline initialized")
 128  
 129      # Initialize graph integration (connects to Sovereign_Estate graph data)
 130      global graph_integration, unified_correlator
 131      graph_integration = get_graph_integration()
 132      print(f"[Server] Graph integration initialized - {graph_integration._state.atom_count} atoms loaded")
 133  
 134      # Initialize unified correlator (connects all data sources)
 135      unified_correlator = get_unified_correlator()
 136      print("[Server] Unified correlator initialized")
 137  
 138      # Initialize voice router, visual capture, video processor
 139      global voice_router, visual_capture, video_processor
 140      voice_router = get_voice_router()
 141      visual_capture = get_visual_capture()
 142      video_processor = get_video_processor()
 143      print("[Server] Voice/visual/video systems initialized")
 144  
 145      # Start daemon in background thread
 146      daemon_thread = threading.Thread(target=run_daemon_loop, daemon=True)
 147      daemon_thread.start()
 148  
 149      # Start pipeline in background thread (optional - can be started on demand)
 150      # pipeline_thread = threading.Thread(target=run_pipeline_loop, daemon=True)
 151      # pipeline_thread.start()
 152  
 153      print("[Server] Attention daemon started")
 154  
 155  
 156  # ==================== Health & Status ====================
 157  
 158  @app.route('/')
 159  def index():
 160      """Landing page."""
 161      return """
 162      <html>
 163      <head>
 164          <title>Sovereign OS Server</title>
 165          <style>
 166              body { font-family: system-ui; max-width: 800px; margin: 50px auto; padding: 20px; }
 167              code { background: #f0f0f0; padding: 2px 6px; border-radius: 3px; }
 168              .status { padding: 10px; border-radius: 5px; margin: 10px 0; }
 169              .healthy { background: #d4edda; }
 170              .section { margin: 20px 0; padding: 15px; border: 1px solid #ddd; border-radius: 5px; }
 171          </style>
 172      </head>
 173      <body>
 174          <h1>Sovereign OS Server</h1>
 175          <div class="status healthy">✓ Server running</div>
 176  
 177          <div class="section">
 178              <h2>Attention System</h2>
 179              <ul>
 180                  <li><a href="/attention/status">GET /attention/status</a> - Current attention state</li>
 181                  <li><a href="/attention/sessions">GET /attention/sessions</a> - Active sessions</li>
 182                  <li><a href="/attention/attractors">GET /attention/attractors</a> - Cross-session attractors</li>
 183                  <li><a href="/attention/coherence">GET /attention/coherence</a> - Coherence risk</li>
 184                  <li><a href="/attention/stream">GET /attention/stream</a> - Context stream state</li>
 185                  <li><a href="/attention/tuning">GET /attention/tuning</a> - Context tuning parameters</li>
 186                  <li><code>POST /attention/engage/&lt;item_id&gt;</code> - Record engagement (feeds tuner)</li>
 187              </ul>
 188          </div>
 189  
 190          <div class="section">
 191              <h2>Natural Context Direction</h2>
 192              <ul>
 193                  <li><code>POST /context/direct</code> - Natural language context direction</li>
 194                  <li><code>POST /context/signals</code> - Detect signal words in text</li>
 195                  <li><a href="/context/validation">GET /context/validation</a> - Items needing validation</li>
 196                  <li><code>POST /context/validate/&lt;item_id&gt;</code> - Validate or reject an item</li>
 197              </ul>
 198              <p><em>Example: POST /context/direct with {"input": "Load the last three days"}</em></p>
 199          </div>
 200  
 201          <div class="section">
 202              <h2>Transcript Mining</h2>
 203              <ul>
 204                  <li><a href="/context/mine">GET /context/mine</a> - Mine transcripts for insights (7 days default)</li>
 205                  <li><a href="/context/mine?hours=72">GET /context/mine?hours=72</a> - Last 3 days</li>
 206                  <li><a href="/context/mine?format=markdown">GET /context/mine?format=markdown</a> - Markdown report</li>
 207                  <li><a href="/context/mine/topics">GET /context/mine/topics</a> - Topic graph for visualization</li>
 208              </ul>
 209              <p><em>Extracts: unmined insights, principles, decisions, topic clusters, signal patterns</em></p>
 210          </div>
 211  
 212          <div class="section">
 213              <h2>Multi-Source Correlation & Pipeline</h2>
 214              <ul>
 215                  <li><a href="/correlate">GET /correlate</a> - Run multi-source correlation (Claude + Monologue + Comet)</li>
 216                  <li><a href="/correlate?format=markdown">GET /correlate?format=markdown</a> - Correlation report in markdown</li>
 217                  <li><a href="/correlate/aha">GET /correlate/aha</a> - Stored aha moments</li>
 218                  <li><a href="/correlate/high-confidence">GET /correlate/high-confidence</a> - High-confidence correlated moments</li>
 219                  <li><a href="/pipeline/status">GET /pipeline/status</a> - Pipeline status</li>
 220                  <li><code>POST /pipeline/collect</code> - Trigger event collection</li>
 221                  <li><code>POST /pipeline/process</code> - Trigger queue processing</li>
 222                  <li><code>POST /pipeline/backfill</code> - Backfill historical data</li>
 223              </ul>
 224              <p><em>Triangulates insights across voice (Monologue), browser (Comet), and Claude sessions</em></p>
 225          </div>
 226  
 227          <div class="section">
 228              <h2>Artifact Analysis</h2>
 229              <ul>
 230                  <li><a href="/artifacts">GET /artifacts</a> - Analyze artifacts as crystallization markers</li>
 231                  <li><a href="/artifacts?format=markdown">GET /artifacts?format=markdown</a> - Markdown report</li>
 232                  <li><a href="/artifacts/unclosed">GET /artifacts/unclosed</a> - Unclosed OODA loops</li>
 233              </ul>
 234              <p><em>Artifacts = crystallization points where thinking became concrete</em></p>
 235          </div>
 236  
 237          <div class="section">
 238              <h2>Semantic Graph (from Sovereign_Estate)</h2>
 239              <ul>
 240                  <li><a href="/graph/status">GET /graph/status</a> - Graph state (atoms, edges, distributions)</li>
 241                  <li><a href="/graph/atoms">GET /graph/atoms</a> - Recent atoms</li>
 242                  <li><a href="/graph/resonance">GET /graph/resonance</a> - High-resonance atoms</li>
 243                  <li><a href="/graph/gravity-wells">GET /graph/gravity-wells</a> - Highly connected nodes</li>
 244                  <li><a href="/graph/search?q=attention">GET /graph/search?q=...</a> - Search atoms</li>
 245                  <li><a href="/graph/stream">GET /graph/stream</a> - SSE stream of new events</li>
 246                  <li><code>POST /graph/reload</code> - Reload graph data</li>
 247              </ul>
 248              <p><em>Streams from stream_session.py and historical_processor.py</em></p>
 249          </div>
 250  
 251          <div class="section">
 252              <h2>Unified Correlation (All Sources)</h2>
 253              <ul>
 254                  <li><a href="/unified/report">GET /unified/report</a> - Full correlation across all sources</li>
 255                  <li><a href="/unified/report?format=markdown">GET /unified/report?format=markdown</a> - Markdown report</li>
 256                  <li><a href="/unified/timeline">GET /unified/timeline</a> - Timeline of all events</li>
 257                  <li><a href="/unified/stream">GET /unified/stream</a> - SSE stream of unified events</li>
 258              </ul>
 259              <p><em>Unifies: Claude sessions + Monologue + Browser + Graph + Artifacts</em></p>
 260          </div>
 261  
 262          <div class="section">
 263              <h2>Voice Commands (Ambient Mode)</h2>
 264              <ul>
 265                  <li><code>POST /voice/command</code> - Process natural language voice command</li>
 266                  <li><a href="/voice/help">GET /voice/help</a> - Available voice commands</li>
 267              </ul>
 268              <p><em>For: AirPods, Apple Watch, Siri Shortcuts</em></p>
 269          </div>
 270  
 271          <div class="section">
 272              <h2>Visual Capture</h2>
 273              <ul>
 274                  <li><code>POST /visual/ingest</code> - Ingest photo/screenshot with OCR</li>
 275                  <li><a href="/visual/recent">GET /visual/recent</a> - Recently captured images</li>
 276                  <li><a href="/visual/search?q=example">GET /visual/search?q=...</a> - Search by OCR text</li>
 277              </ul>
 278              <p><em>For: iPhone camera, Meta Glasses, document scans</em></p>
 279          </div>
 280  
 281          <div class="section">
 282              <h2>Work Sessions (Technician Mode)</h2>
 283              <ul>
 284                  <li><code>POST /work/start</code> - Start work session at property</li>
 285                  <li><code>POST /work/end/&lt;session_id&gt;</code> - End session, generate notes</li>
 286                  <li><code>POST /work/note/&lt;session_id&gt;</code> - Add note (issue/resolution/part)</li>
 287              </ul>
 288              <p><em>For: Field technicians with smart glasses + narration</em></p>
 289          </div>
 290  
 291          <div class="section">
 292              <h2>Video Processing</h2>
 293              <ul>
 294                  <li><code>POST /video/process</code> - Process video (transcribe + keyframes)</li>
 295                  <li><a href="/video/info">GET /video/&lt;video_id&gt;</a> - Get processed video info</li>
 296              </ul>
 297              <p><em>Extracts: transcript, keyframes, OCR, graph atoms</em></p>
 298          </div>
 299  
 300          <div class="section">
 301              <h2>Mobile / Shortcuts</h2>
 302              <ul>
 303                  <li><a href="/mobile/status">GET /mobile/status</a> - Quick status for mobile</li>
 304                  <li><a href="/mobile/brief">GET /mobile/brief</a> - Today's brief (insights + next steps)</li>
 305                  <li><a href="/mobile/aha">GET /mobile/aha</a> - Recent aha moments (clean format)</li>
 306                  <li><code>POST /mobile/capture</code> - Capture a quick thought</li>
 307              </ul>
 308              <p><em>Optimized for iOS Shortcuts and mobile displays</em></p>
 309          </div>
 310  
 311          <div class="section">
 312              <h2>Overcast Webhook</h2>
 313              <ul>
 314                  <li><code>POST /overcast</code> - Receive OPML from iOS</li>
 315                  <li><a href="/overcast/latest">GET /overcast/latest</a> - Latest sync info</li>
 316              </ul>
 317          </div>
 318  
 319          <div class="section">
 320              <h2>System</h2>
 321              <ul>
 322                  <li><a href="/health">GET /health</a> - Health check</li>
 323              </ul>
 324          </div>
 325  
 326          <p><em>"Attention is all you need"</em></p>
 327      </body>
 328      </html>
 329      """
 330  
 331  
 332  @app.route('/health')
 333  def health():
 334      """Health check endpoint."""
 335      daemon_status = "running" if daemon_thread and daemon_thread.is_alive() else "stopped"
 336  
 337      return jsonify({
 338          'status': 'healthy',
 339          'service': 'Sovereign OS Server',
 340          'daemon': daemon_status,
 341          'timestamp': datetime.now().isoformat()
 342      })
 343  
 344  
 345  # ==================== Attention API ====================
 346  
 347  @app.route('/attention/status')
 348  def attention_status():
 349      """Get full attention system status."""
 350      if not daemon:
 351          return jsonify({'error': 'Daemon not initialized'}), 503
 352  
 353      state = daemon.cross_session_tracker.get_state()
 354      stream_state = daemon.context_stream.get_state()
 355  
 356      # Clean attractors
 357      attractors = [a for a in state.cross_session_attractors
 358                    if a and len(a) > 2 and '\n' not in a]
 359  
 360      return jsonify({
 361          'sessions': {
 362              'count': len(state.sessions),
 363              'ids': list(state.sessions.keys())[:10]
 364          },
 365          'cross_session': {
 366              'attractors': attractors[:20],
 367              'events': len(state.cross_events),
 368              'focus': state.focus_session
 369          },
 370          'context_stream': {
 371              'items': stream_state.item_count,
 372              'tokens': stream_state.total_tokens,
 373              'pressure': f"{stream_state.pressure:.0%}"
 374          },
 375          'aha': {
 376              'candidates': len(daemon.aha_detector.candidates),
 377              'validated': len(daemon.aha_detector.validated_ahas)
 378          }
 379      })
 380  
 381  
 382  @app.route('/attention/sessions')
 383  def attention_sessions():
 384      """Get active session info."""
 385      if not daemon:
 386          return jsonify({'error': 'Daemon not initialized'}), 503
 387  
 388      state = daemon.cross_session_tracker.get_state()
 389  
 390      sessions = []
 391      for sid, info in state.sessions.items():
 392          sessions.append({
 393              'id': sid,
 394              'source': info.source,
 395              'started': info.started_at.isoformat() if info.started_at else None,
 396              'last_activity': info.last_activity.isoformat() if info.last_activity else None,
 397              'topics': list(info.topics)[:10],
 398              'atom_count': info.atom_count
 399          })
 400  
 401      return jsonify({
 402          'count': len(sessions),
 403          'sessions': sessions
 404      })
 405  
 406  
 407  @app.route('/attention/attractors')
 408  def attention_attractors():
 409      """Get cross-session attractors (hot topics)."""
 410      if not daemon:
 411          return jsonify({'error': 'Daemon not initialized'}), 503
 412  
 413      state = daemon.cross_session_tracker.get_state()
 414  
 415      # Clean and enrich attractors
 416      attractors = []
 417      for topic in state.cross_session_attractors:
 418          if not topic or len(topic) < 3 or '\n' in topic:
 419              continue
 420  
 421          sessions = state.active_topics.get(topic, [])
 422          attractors.append({
 423              'topic': topic,
 424              'sessions': sessions,
 425              'session_count': len(sessions)
 426          })
 427  
 428      # Sort by session count
 429      attractors.sort(key=lambda x: x['session_count'], reverse=True)
 430  
 431      return jsonify({
 432          'count': len(attractors),
 433          'attractors': attractors[:30]
 434      })
 435  
 436  
 437  @app.route('/attention/coherence')
 438  def attention_coherence():
 439      """Get coherence risk assessment."""
 440      if not daemon:
 441          return jsonify({'error': 'Daemon not initialized'}), 503
 442  
 443      high_risk = daemon.coherence_detector.get_high_risk_topics(threshold=0.3)
 444  
 445      risks = []
 446      for topic, score, sessions in high_risk:
 447          risks.append({
 448              'topic': topic,
 449              'risk_score': f"{score:.0%}",
 450              'sessions': sessions
 451          })
 452  
 453      return jsonify({
 454          'count': len(risks),
 455          'high_risk_topics': risks[:20]
 456      })
 457  
 458  
 459  @app.route('/attention/stream')
 460  def attention_stream():
 461      """Get context stream state."""
 462      if not daemon:
 463          return jsonify({'error': 'Daemon not initialized'}), 503
 464  
 465      state = daemon.context_stream.get_state()
 466      items = daemon.context_stream.get_context(max_tokens=10000)
 467  
 468      return jsonify({
 469          'state': {
 470              'total_tokens': state.total_tokens,
 471              'max_tokens': state.max_tokens,
 472              'pressure': f"{state.pressure:.0%}",
 473              'item_count': state.item_count,
 474              'avg_attention': f"{state.avg_attention:.2f}"
 475          },
 476          'top_items': [
 477              {
 478                  'id': item.id[:20],
 479                  'attention': f"{item.attention_score:.2f}",
 480                  'priority': item.priority.value,
 481                  'source': item.source
 482              }
 483              for item in items[:10]
 484          ]
 485      })
 486  
 487  
 488  @app.route('/attention/inject/<session_id>', methods=['POST'])
 489  def attention_inject(session_id):
 490      """
 491      Get cross-session awareness to inject into a session.
 492  
 493      Called by Claude Code hooks to get context about other sessions.
 494      """
 495      if not daemon:
 496          return jsonify({'error': 'Daemon not initialized'}), 503
 497  
 498      state = daemon.cross_session_tracker.get_state()
 499  
 500      # Get attractors relevant to this session
 501      attractors = [a for a in state.cross_session_attractors
 502                    if a and len(a) > 2 and '\n' not in a]
 503  
 504      # Get other active sessions
 505      other_sessions = [s for s in state.sessions.keys() if s != session_id]
 506  
 507      # Check coherence risk
 508      high_risk = daemon.coherence_detector.get_high_risk_topics(threshold=0.5)
 509      relevant_risks = [
 510          {'topic': t, 'score': s}
 511          for t, s, sessions in high_risk
 512          if session_id in sessions
 513      ]
 514  
 515      # Build injection message
 516      if attractors or relevant_risks:
 517          message = f"<cross-session-awareness>\n"
 518          if attractors:
 519              message += f"Hot topics across {len(other_sessions)} other sessions: {', '.join(attractors[:5])}\n"
 520          if relevant_risks:
 521              message += f"Coherence risk: {', '.join(r['topic'] for r in relevant_risks[:3])}\n"
 522          message += "</cross-session-awareness>"
 523      else:
 524          message = None
 525  
 526      return jsonify({
 527          'session_id': session_id,
 528          'other_sessions': len(other_sessions),
 529          'attractors': attractors[:10],
 530          'coherence_risks': relevant_risks[:5],
 531          'injection_message': message
 532      })
 533  
 534  
 535  # ==================== Context Tuning API ====================
 536  
 537  @app.route('/attention/tuning')
 538  def attention_tuning():
 539      """Get current context stream tuning parameters."""
 540      if not daemon:
 541          return jsonify({'error': 'Daemon not initialized'}), 503
 542  
 543      report = daemon.get_tuning_report()
 544      return jsonify(report)
 545  
 546  
 547  @app.route('/attention/engage/<item_id>', methods=['POST'])
 548  def attention_engage(item_id):
 549      """
 550      Record operator engagement with a context item.
 551  
 552      Body (JSON):
 553          engagement_type: 'reference' | 'expand' | 'action' | 'ignore' | 'dismiss' | 'revive'
 554          context: optional dict with additional context
 555  
 556      This feeds the tuner to learn operator preferences.
 557      """
 558      if not daemon:
 559          return jsonify({'error': 'Daemon not initialized'}), 503
 560  
 561      data = request.get_json() or {}
 562      engagement_type = data.get('engagement_type', 'reference')
 563      context = data.get('context')
 564  
 565      daemon.record_engagement(item_id, engagement_type, context)
 566  
 567      return jsonify({
 568          'status': 'ok',
 569          'item_id': item_id,
 570          'engagement_type': engagement_type
 571      })
 572  
 573  
 574  # ==================== Natural Context Direction ====================
 575  
 576  @app.route('/context/direct', methods=['POST'])
 577  def context_direct():
 578      """
 579      Natural language context direction.
 580  
 581      Body (JSON):
 582          input: "Load the last three days" or "Remind me what I was working on"
 583  
 584      Supports multi-step refinement:
 585          1. "Remind me what I was working on" → summary + options
 586          2. "Focus on the authentication work" → narrowed
 587          3. "Load that" → inject into context
 588  
 589      Returns conversational response with suggestions for next steps.
 590      """
 591      if not natural_director:
 592          return jsonify({'error': 'Director not initialized'}), 503
 593  
 594      data = request.get_json() or {}
 595      input_text = data.get('input', '')
 596  
 597      if not input_text:
 598          return jsonify({
 599              'error': 'No input provided',
 600              'hint': 'Try: "Remind me what I was working on" or "Load the last three days"'
 601          }), 400
 602  
 603      response = natural_director.process(input_text)
 604  
 605      return jsonify({
 606          'message': response.message,
 607          'sessions_found': response.sessions_found,
 608          'topics': response.topics_found,
 609          'decisions': response.decisions_found,
 610          'suggestions': response.suggestions,
 611          'items_loaded': response.items_loaded,
 612          'tokens_loaded': response.tokens_loaded,
 613          'can_refine': response.can_refine,
 614          'refinement_options': response.refinement_options
 615      })
 616  
 617  
 618  @app.route('/context/signals', methods=['POST'])
 619  def context_signals():
 620      """
 621      Detect signal words in text and return weight/persistence info.
 622  
 623      Body (JSON):
 624          text: "Remember this core principle: attention is all you need"
 625  
 626      Returns signal analysis including weight modifier and suggested tags.
 627      """
 628      data = request.get_json() or {}
 629      text = data.get('text', '')
 630  
 631      if not text:
 632          return jsonify({'error': 'No text provided'}), 400
 633  
 634      detection = detect_signals(text)
 635  
 636      return jsonify(detection.to_dict())
 637  
 638  
 639  @app.route('/context/validation', methods=['GET'])
 640  def context_validation():
 641      """Get items that need validation ('what do you think?' items)."""
 642      if not daemon:
 643          return jsonify({'error': 'Daemon not initialized'}), 503
 644  
 645      items = daemon.context_stream.get_items_needing_validation()
 646  
 647      return jsonify({
 648          'count': len(items),
 649          'items': [
 650              {
 651                  'id': item.id,
 652                  'content': item.content[:200],
 653                  'attention': f"{item.attention_score:.2f}",
 654                  'tags': list(item.signal_tags)
 655              }
 656              for item in items
 657          ]
 658      })
 659  
 660  
 661  @app.route('/context/validate/<item_id>', methods=['POST'])
 662  def context_validate(item_id):
 663      """
 664      Validate or reject an item that was seeking validation.
 665  
 666      Body (JSON):
 667          validated: true or false
 668      """
 669      if not daemon:
 670          return jsonify({'error': 'Daemon not initialized'}), 503
 671  
 672      data = request.get_json() or {}
 673      validated = data.get('validated', True)
 674  
 675      success = daemon.context_stream.validate_item(item_id, validated)
 676  
 677      if success:
 678          return jsonify({
 679              'status': 'ok',
 680              'item_id': item_id,
 681              'validated': validated
 682          })
 683      else:
 684          return jsonify({'error': 'Item not found'}), 404
 685  
 686  
 687  # ==================== Transcript Mining ====================
 688  
 689  @app.route('/context/mine', methods=['GET', 'POST'])
 690  def context_mine():
 691      """
 692      Mine transcripts for insights, patterns, and unmined content.
 693  
 694      GET parameters or POST body (JSON):
 695          hours: Hours to look back (default 168 = 7 days)
 696          speaker: Filter by speaker ('Rick', 'Claude', or null for all)
 697          min_weight: Minimum weight threshold (default 0.0)
 698          format: 'json' or 'markdown' (default 'json')
 699  
 700      Returns:
 701          - unmined_insights: High-weight items not yet integrated
 702          - principles: Identified principles
 703          - decisions: Decisions made
 704          - topic_clusters: Topics with related items
 705          - signal_patterns: Your common signal word usage
 706          - validation_seeking_rate: How often you ask "what do you think?"
 707      """
 708      if request.method == 'POST':
 709          data = request.get_json() or {}
 710      else:
 711          data = request.args
 712  
 713      hours = float(data.get('hours', 168))
 714      speaker = data.get('speaker')
 715      min_weight = float(data.get('min_weight', 0.0))
 716      output_format = data.get('format', 'json')
 717  
 718      # Create miner and run
 719      miner = TranscriptMiner(str(SESSIONS_DIR))
 720      report = miner.mine(
 721          hours_back=hours,
 722          speaker_filter=speaker,
 723          min_weight=min_weight
 724      )
 725  
 726      if output_format == 'markdown':
 727          return report.to_markdown(), 200, {'Content-Type': 'text/markdown'}
 728  
 729      # JSON format
 730      return jsonify({
 731          'summary': {
 732              'sessions_analyzed': report.sessions_analyzed,
 733              'atoms_processed': report.atoms_processed,
 734              'time_window_hours': report.time_window_hours,
 735              'validation_seeking_rate': f"{report.validation_seeking_rate:.1%}"
 736          },
 737          'unmined_insights': [
 738              {
 739                  'content': atom.content[:300],
 740                  'weight': atom.weight,
 741                  'tags': list(atom.tags),
 742                  'session': atom.session_id,
 743                  'speaker': atom.speaker
 744              }
 745              for atom in report.unmined_insights[:20]
 746          ],
 747          'principles': [
 748              {
 749                  'content': atom.content[:200],
 750                  'weight': atom.weight,
 751                  'session': atom.session_id
 752              }
 753              for atom in report.principles[:15]
 754          ],
 755          'decisions': [
 756              {
 757                  'content': atom.content[:150],
 758                  'weight': atom.weight,
 759                  'session': atom.session_id
 760              }
 761              for atom in report.decisions[:15]
 762          ],
 763          'topic_clusters': [
 764              {
 765                  'topic': cluster.topic,
 766                  'total_weight': cluster.total_weight,
 767                  'insight_count': cluster.insight_count,
 768                  'decision_count': cluster.decision_count,
 769                  'principle_count': cluster.principle_count,
 770                  'related_topics': dict(sorted(
 771                      cluster.related_topics.items(),
 772                      key=lambda x: -x[1]
 773                  )[:5])
 774              }
 775              for cluster in sorted(
 776                  report.topic_clusters.values(),
 777                  key=lambda c: c.total_weight,
 778                  reverse=True
 779              )[:20]
 780          ],
 781          'signal_patterns': [
 782              {'signal': signal, 'count': count}
 783              for signal, count in report.your_common_signals[:20]
 784          ]
 785      })
 786  
 787  
 788  @app.route('/context/mine/topics', methods=['GET'])
 789  def context_mine_topics():
 790      """
 791      Get topic map for visualization.
 792  
 793      Returns nodes and edges suitable for graph visualization.
 794      """
 795      hours = float(request.args.get('hours', 168))
 796  
 797      miner = TranscriptMiner(str(SESSIONS_DIR))
 798      miner.mine(hours_back=hours)
 799  
 800      # Build graph data
 801      nodes = []
 802      edges = []
 803  
 804      for topic, cluster in miner._topic_clusters.items():
 805          nodes.append({
 806              'id': topic,
 807              'weight': cluster.total_weight,
 808              'insights': cluster.insight_count,
 809              'decisions': cluster.decision_count,
 810              'principles': cluster.principle_count
 811          })
 812  
 813          for related, count in cluster.related_topics.items():
 814              if count > 1:  # Only significant connections
 815                  edges.append({
 816                      'source': topic,
 817                      'target': related,
 818                      'weight': count
 819                  })
 820  
 821      return jsonify({
 822          'nodes': nodes,
 823          'edges': edges,
 824          'node_count': len(nodes),
 825          'edge_count': len(edges)
 826      })
 827  
 828  
 829  # ==================== Multi-Source Correlation & Pipeline ====================
 830  
 831  @app.route('/pipeline/status')
 832  def pipeline_status():
 833      """Get ingestion pipeline status."""
 834      if not ingestion_pipeline:
 835          return jsonify({'error': 'Pipeline not initialized'}), 503
 836  
 837      return jsonify(ingestion_pipeline.get_state())
 838  
 839  
 840  @app.route('/pipeline/collect', methods=['POST'])
 841  def pipeline_collect():
 842      """Trigger manual collection of new events."""
 843      if not ingestion_pipeline:
 844          return jsonify({'error': 'Pipeline not initialized'}), 503
 845  
 846      collected = ingestion_pipeline.collect_new_events()
 847      return jsonify({
 848          'status': 'ok',
 849          'events_collected': collected
 850      })
 851  
 852  
 853  @app.route('/pipeline/process', methods=['POST'])
 854  def pipeline_process():
 855      """Trigger manual processing of queued events."""
 856      if not ingestion_pipeline:
 857          return jsonify({'error': 'Pipeline not initialized'}), 503
 858  
 859      processed = ingestion_pipeline.process_queue()
 860      return jsonify({
 861          'status': 'ok',
 862          'events_processed': processed
 863      })
 864  
 865  
 866  @app.route('/pipeline/backfill', methods=['POST'])
 867  def pipeline_backfill():
 868      """
 869      Backfill historical data from all sources.
 870  
 871      Body (JSON):
 872          hours: Hours to look back (default 168 = 7 days)
 873      """
 874      if not ingestion_pipeline:
 875          return jsonify({'error': 'Pipeline not initialized'}), 503
 876  
 877      data = request.get_json() or {}
 878      hours = float(data.get('hours', 168))
 879  
 880      result = ingestion_pipeline.backfill(hours_back=hours)
 881      return jsonify(result)
 882  
 883  
 884  @app.route('/correlate', methods=['GET', 'POST'])
 885  def correlate():
 886      """
 887      Run multi-source correlation.
 888  
 889      GET parameters or POST body:
 890          hours: Hours to look back (default 168)
 891          format: 'json' or 'markdown' (default 'json')
 892  
 893      Correlates across:
 894          - Claude sessions
 895          - Monologue voice transcripts
 896          - Comet browser history
 897  
 898      Returns aha moments, validation-seeking, research-corroborated insights.
 899      """
 900      if request.method == 'POST':
 901          data = request.get_json() or {}
 902      else:
 903          data = request.args
 904  
 905      hours = float(data.get('hours', 168))
 906      output_format = data.get('format', 'json')
 907  
 908      report = correlate_sources(hours_back=hours)
 909  
 910      if output_format == 'markdown':
 911          return report.to_markdown(), 200, {'Content-Type': 'text/markdown'}
 912  
 913      return jsonify({
 914          'summary': {
 915              'time_window_hours': report.time_window_hours,
 916              'total_events': report.total_events,
 917              'multi_source_moments': report.multi_source_moments,
 918              'sources': report.sources_found
 919          },
 920          'aha_moments': [m.to_dict() for m in report.aha_moments[:15]],
 921          'validation_seeking': [m.to_dict() for m in report.validation_seeking[:15]],
 922          'research_corroborated': [m.to_dict() for m in report.research_corroborated[:15]],
 923          'topic_research_map': {k: v[:5] for k, v in list(report.topic_research_map.items())[:10]}
 924      })
 925  
 926  
 927  @app.route('/correlate/aha', methods=['GET'])
 928  def correlate_aha():
 929      """Get stored aha moments from the pipeline database."""
 930      if not ingestion_pipeline:
 931          return jsonify({'error': 'Pipeline not initialized'}), 503
 932  
 933      limit = int(request.args.get('limit', 20))
 934      moments = ingestion_pipeline.get_aha_moments(limit=limit)
 935  
 936      return jsonify({
 937          'count': len(moments),
 938          'aha_moments': moments
 939      })
 940  
 941  
 942  @app.route('/correlate/high-confidence', methods=['GET'])
 943  def correlate_high_confidence():
 944      """Get high-confidence correlated moments."""
 945      if not ingestion_pipeline:
 946          return jsonify({'error': 'Pipeline not initialized'}), 503
 947  
 948      min_confidence = float(request.args.get('min_confidence', 0.5))
 949      limit = int(request.args.get('limit', 50))
 950  
 951      moments = ingestion_pipeline.get_high_confidence_moments(
 952          min_confidence=min_confidence,
 953          limit=limit
 954      )
 955  
 956      return jsonify({
 957          'count': len(moments),
 958          'min_confidence': min_confidence,
 959          'moments': moments
 960      })
 961  
 962  
 963  # ==================== Artifact Analysis ====================
 964  
 965  @app.route('/artifacts', methods=['GET'])
 966  def artifacts():
 967      """
 968      Analyze Claude.ai artifacts as crystallization markers.
 969  
 970      GET parameters:
 971          hours: Hours to look back (default 336 = 2 weeks)
 972          format: 'json' or 'markdown' (default 'json')
 973  
 974      Artifacts represent moments where thinking crystallized into
 975      something concrete - they're high-weight signal markers.
 976      """
 977      hours = float(request.args.get('hours', 336))
 978      output_format = request.args.get('format', 'json')
 979  
 980      report = analyze_artifacts(hours_back=hours)
 981  
 982      if output_format == 'markdown':
 983          return report.to_markdown(), 200, {'Content-Type': 'text/markdown'}
 984  
 985      return jsonify({
 986          'summary': {
 987              'artifacts_analyzed': report.artifacts_analyzed,
 988              'time_window_hours': report.time_window_hours,
 989              'unclosed_loops': len(report.unclosed_loops),
 990              'artifact_types': report.artifact_types
 991          },
 992          'crystallization_moments': [
 993              {
 994                  'name': cm.artifact.conversation_name,
 995                  'timestamp': cm.timestamp.isoformat(),
 996                  'duration_hours': cm.artifact.duration_hours,
 997                  'weight': cm.artifact.weight,
 998                  'type': cm.artifact.artifact_type,
 999                  'topics': list(cm.topics)[:10]
1000              }
1001              for cm in report.crystallization_moments[:15]
1002          ],
1003          'unclosed_loops': [
1004              {
1005                  'name': a.conversation_name,
1006                  'created': a.created_at.isoformat(),
1007                  'weight': a.weight,
1008                  'topics': list(a.topics)[:5]
1009              }
1010              for a in report.unclosed_loops[:10]
1011          ],
1012          'principles': report.principles_in_artifacts[:10],
1013          'decisions': report.decisions_in_artifacts[:10],
1014          'topic_distribution': {k: len(v) for k, v in list(report.topic_to_artifacts.items())[:15]}
1015      })
1016  
1017  
1018  @app.route('/artifacts/unclosed', methods=['GET'])
1019  def artifacts_unclosed():
1020      """
1021      Get unclosed OODA loops - artifacts created but never revisited.
1022  
1023      These represent ideas that crystallized but weren't integrated.
1024      """
1025      hours = float(request.args.get('hours', 336))
1026  
1027      report = analyze_artifacts(hours_back=hours)
1028  
1029      return jsonify({
1030          'count': len(report.unclosed_loops),
1031          'unclosed_loops': [
1032              {
1033                  'name': a.conversation_name,
1034                  'created': a.created_at.isoformat(),
1035                  'weight': a.weight,
1036                  'type': a.artifact_type,
1037                  'topics': list(a.topics)
1038              }
1039              for a in report.unclosed_loops
1040          ]
1041      })
1042  
1043  
1044  # ==================== Graph Integration ====================
1045  
1046  @app.route('/graph/status')
1047  def graph_status():
1048      """
1049      Get semantic graph status from Sovereign_Estate.
1050  
1051      Shows atoms, edges, gravity wells, and altitude distribution.
1052      """
1053      if not graph_integration:
1054          return jsonify({'error': 'Graph integration not initialized'}), 503
1055  
1056      state = graph_integration.get_state()
1057      return jsonify(state)
1058  
1059  
1060  @app.route('/graph/atoms')
1061  def graph_atoms():
1062      """
1063      Get recent atoms from the semantic graph.
1064  
1065      GET parameters:
1066          limit: Max atoms to return (default 50)
1067          source_type: Filter by 'user' or 'assistant'
1068          min_weight: Minimum weight threshold
1069      """
1070      if not graph_integration:
1071          return jsonify({'error': 'Graph integration not initialized'}), 503
1072  
1073      limit = int(request.args.get('limit', 50))
1074      source_type = request.args.get('source_type')
1075      min_weight = float(request.args.get('min_weight', 0.0))
1076  
1077      atoms = graph_integration.get_recent_atoms(
1078          limit=limit,
1079          source_type=source_type,
1080          min_weight=min_weight
1081      )
1082  
1083      return jsonify({
1084          'count': len(atoms),
1085          'atoms': atoms
1086      })
1087  
1088  
1089  @app.route('/graph/atom/<uuid>')
1090  def graph_atom(uuid):
1091      """Get a specific atom by UUID."""
1092      if not graph_integration:
1093          return jsonify({'error': 'Graph integration not initialized'}), 503
1094  
1095      atom = graph_integration.get_atom(uuid)
1096      if not atom:
1097          return jsonify({'error': 'Atom not found'}), 404
1098  
1099      return jsonify(atom)
1100  
1101  
1102  @app.route('/graph/resonance')
1103  def graph_resonance():
1104      """
1105      Get high-resonance atoms - the most significant content.
1106  
1107      GET parameters:
1108          limit: Max atoms to return (default 20)
1109      """
1110      if not graph_integration:
1111          return jsonify({'error': 'Graph integration not initialized'}), 503
1112  
1113      limit = int(request.args.get('limit', 20))
1114      atoms = graph_integration.get_high_resonance_atoms(limit=limit)
1115  
1116      return jsonify({
1117          'count': len(atoms),
1118          'high_resonance_atoms': atoms
1119      })
1120  
1121  
1122  @app.route('/graph/gravity-wells')
1123  def graph_gravity_wells():
1124      """
1125      Get gravity wells - highly connected nodes in the graph.
1126  
1127      Gravity wells have 10+ edges and represent central topics.
1128      """
1129      if not graph_integration:
1130          return jsonify({'error': 'Graph integration not initialized'}), 503
1131  
1132      wells = graph_integration.get_gravity_wells()
1133  
1134      return jsonify({
1135          'count': len(wells),
1136          'gravity_wells': wells
1137      })
1138  
1139  
1140  @app.route('/graph/search')
1141  def graph_search():
1142      """
1143      Search atoms by text content.
1144  
1145      GET parameters:
1146          q: Search query
1147          limit: Max results (default 20)
1148      """
1149      if not graph_integration:
1150          return jsonify({'error': 'Graph integration not initialized'}), 503
1151  
1152      query = request.args.get('q', '')
1153      if not query:
1154          return jsonify({'error': 'No query provided'}), 400
1155  
1156      limit = int(request.args.get('limit', 20))
1157      results = graph_integration.search_atoms(query, limit=limit)
1158  
1159      return jsonify({
1160          'query': query,
1161          'count': len(results),
1162          'results': results
1163      })
1164  
1165  
1166  @app.route('/graph/stream')
1167  def graph_stream():
1168      """
1169      Server-Sent Events stream of new graph events.
1170  
1171      Connect with EventSource to receive real-time updates:
1172          - atom: New atom added
1173          - edge: New edge formed
1174          - gravity_well: New gravity well detected
1175  
1176      Example:
1177          const es = new EventSource('/graph/stream');
1178          es.onmessage = (e) => console.log(JSON.parse(e.data));
1179      """
1180      if not graph_integration:
1181          return jsonify({'error': 'Graph integration not initialized'}), 503
1182  
1183      import queue
1184  
1185      # Create a queue for this client
1186      client_queue = queue.Queue()
1187  
1188      def on_event(event):
1189          client_queue.put(event)
1190  
1191      # Subscribe to all events
1192      graph_integration.subscribe('all', on_event)
1193  
1194      def generate():
1195          yield f"data: {json.dumps({'type': 'connected', 'atom_count': graph_integration._state.atom_count})}\n\n"
1196          while True:
1197              try:
1198                  event = client_queue.get(timeout=30)
1199                  yield f"data: {json.dumps({'type': event.event_type, 'data': event.data, 'timestamp': event.timestamp.isoformat()})}\n\n"
1200              except queue.Empty:
1201                  # Send heartbeat
1202                  yield f"data: {json.dumps({'type': 'heartbeat'})}\n\n"
1203  
1204      return Response(
1205          generate(),
1206          mimetype='text/event-stream',
1207          headers={
1208              'Cache-Control': 'no-cache',
1209              'Connection': 'keep-alive',
1210              'X-Accel-Buffering': 'no'
1211          }
1212      )
1213  
1214  
1215  @app.route('/graph/reload', methods=['POST'])
1216  def graph_reload():
1217      """
1218      Reload graph data from files.
1219  
1220      POST body (JSON):
1221          days: Days to load (default 1 for today only)
1222      """
1223      if not graph_integration:
1224          return jsonify({'error': 'Graph integration not initialized'}), 503
1225  
1226      data = request.get_json() or {}
1227      days = int(data.get('days', 1))
1228  
1229      if days == 1:
1230          state = graph_integration.load_today()
1231      else:
1232          state = graph_integration.load_range(days_back=days)
1233  
1234      return jsonify({
1235          'status': 'reloaded',
1236          'atoms': state.atom_count,
1237          'edges': state.edge_count,
1238          'gravity_wells': len(state.gravity_wells)
1239      })
1240  
1241  
1242  # ==================== Unified Correlation ====================
1243  
1244  @app.route('/unified/report')
1245  def unified_report():
1246      """
1247      Get unified correlation report across ALL sources.
1248  
1249      Combines:
1250      - Claude sessions
1251      - Monologue transcripts
1252      - Browser history
1253      - Semantic graph
1254      - Artifacts
1255  
1256      GET parameters:
1257          hours: Hours to look back (default 24)
1258          format: 'json' or 'markdown' (default 'json')
1259      """
1260      if not unified_correlator:
1261          return jsonify({'error': 'Unified correlator not initialized'}), 503
1262  
1263      hours = float(request.args.get('hours', 24))
1264      output_format = request.args.get('format', 'json')
1265  
1266      report = unified_correlator.get_unified_report(hours_back=hours)
1267  
1268      if output_format == 'markdown':
1269          return report.to_markdown(), 200, {'Content-Type': 'text/markdown'}
1270  
1271      return jsonify({
1272          'summary': report.to_dict(),
1273          'aha_moments': [e.to_dict() for e in report.aha_moments[:15]],
1274          'high_resonance': [e.to_dict() for e in report.high_resonance[:15]],
1275          'gravity_wells': report.gravity_wells[:10],
1276          'topics': {k: v[:5] for k, v in list(report.unified_topic_map.items())[:15]}
1277      })
1278  
1279  
1280  @app.route('/unified/timeline')
1281  def unified_timeline():
1282      """
1283      Get timeline of all events across all sources.
1284  
1285      GET parameters:
1286          hours: Hours to look back (default 24)
1287          types: Comma-separated event types to include (optional)
1288          limit: Max events (default 100)
1289      """
1290      if not unified_correlator:
1291          return jsonify({'error': 'Unified correlator not initialized'}), 503
1292  
1293      hours = float(request.args.get('hours', 24))
1294      types_str = request.args.get('types', '')
1295      event_types = types_str.split(',') if types_str else None
1296      limit = int(request.args.get('limit', 100))
1297  
1298      events = unified_correlator.get_timeline(
1299          hours_back=hours,
1300          event_types=event_types
1301      )[:limit]
1302  
1303      return jsonify({
1304          'count': len(events),
1305          'hours': hours,
1306          'events': [e.to_dict() for e in events]
1307      })
1308  
1309  
1310  @app.route('/unified/stream')
1311  def unified_stream():
1312      """
1313      Server-Sent Events stream of unified events from ALL sources.
1314  
1315      Connect with EventSource to receive real-time updates from:
1316      - Graph (atoms, edges, gravity wells)
1317      - Correlations (aha moments)
1318      - High resonance content
1319  
1320      Example:
1321          const es = new EventSource('/unified/stream');
1322          es.onmessage = (e) => console.log(JSON.parse(e.data));
1323      """
1324      if not unified_correlator:
1325          return jsonify({'error': 'Unified correlator not initialized'}), 503
1326  
1327      import queue
1328  
1329      client_queue = queue.Queue()
1330  
1331      def on_event(event):
1332          client_queue.put(event)
1333  
1334      unified_correlator.subscribe('all', on_event)
1335  
1336      def generate():
1337          yield f"data: {json.dumps({'type': 'connected', 'sources': ['graph', 'correlator', 'artifacts']})}\n\n"
1338          while True:
1339              try:
1340                  event = client_queue.get(timeout=30)
1341                  yield f"data: {json.dumps(event.to_dict())}\n\n"
1342              except queue.Empty:
1343                  yield f"data: {json.dumps({'type': 'heartbeat'})}\n\n"
1344  
1345      return Response(
1346          generate(),
1347          mimetype='text/event-stream',
1348          headers={
1349              'Cache-Control': 'no-cache',
1350              'Connection': 'keep-alive',
1351              'X-Accel-Buffering': 'no'
1352          }
1353      )
1354  
1355  
1356  # ==================== Voice Commands ====================
1357  
1358  @app.route('/voice/command', methods=['POST'])
1359  def voice_command():
1360      """
1361      Process a voice command and route to appropriate handler.
1362  
1363      POST body (JSON or plain text):
1364          command: "What was I working on today?"
1365  
1366      Returns intent, action to take, and response text for TTS.
1367  
1368      Example commands:
1369          - "What was I working on?"
1370          - "Capture this: [thought]"
1371          - "Run correlation"
1372          - "That's important"
1373          - "Brief me"
1374      """
1375      if not voice_router:
1376          return jsonify({'error': 'Voice router not initialized'}), 503
1377  
1378      # Accept both JSON and plain text
1379      if request.is_json:
1380          data = request.get_json()
1381          command = data.get('command', '')
1382      else:
1383          command = request.data.decode('utf-8')
1384  
1385      if not command:
1386          return jsonify({'error': 'No command provided'}), 400
1387  
1388      result = voice_router.process(command)
1389  
1390      # If the action is an API endpoint, optionally execute it
1391      execute = request.args.get('execute', 'false').lower() == 'true'
1392      if execute and result.action.startswith('/'):
1393          # Make internal request to the action endpoint
1394          # This allows voice commands to directly execute
1395          pass  # Implementation would use test client
1396  
1397      return jsonify(result.to_dict())
1398  
1399  
1400  @app.route('/voice/help')
1401  def voice_help():
1402      """Get available voice commands."""
1403      return jsonify({
1404          'commands': [
1405              {'pattern': 'What was I working on?', 'action': 'context_query'},
1406              {'pattern': 'Capture this: [content]', 'action': 'capture'},
1407              {'pattern': "That's important", 'action': 'mark_important'},
1408              {'pattern': 'Run correlation', 'action': 'run_correlation'},
1409              {'pattern': 'Show me [view]', 'action': 'show_dashboard'},
1410              {'pattern': 'Brief me', 'action': 'read_brief'},
1411              {'pattern': 'Search for [topic]', 'action': 'topic_search'},
1412              {'pattern': 'Sync now', 'action': 'run_sync'},
1413              {'pattern': 'Scratch that', 'action': 'mark_ignore'},
1414          ]
1415      })
1416  
1417  
1418  # ==================== Visual Capture ====================
1419  
1420  @app.route('/visual/ingest', methods=['POST'])
1421  def visual_ingest():
1422      """
1423      Ingest an image for processing.
1424  
1425      POST body: multipart/form-data or raw image bytes
1426          image: Image file or base64 encoded
1427          source: 'iphone', 'meta_glasses', 'ipad', 'screenshot'
1428          job_id: Associated work session (optional)
1429          tags: Comma-separated tags (optional)
1430          timestamp: ISO timestamp (optional, defaults to now)
1431          gps_lat, gps_lon: GPS coordinates (optional)
1432  
1433      Returns processed asset with OCR text and metadata.
1434      """
1435      if not visual_capture:
1436          return jsonify({'error': 'Visual capture not initialized'}), 503
1437  
1438      # Get image data
1439      if request.content_type and 'multipart' in request.content_type:
1440          file = request.files.get('image')
1441          if not file:
1442              return jsonify({'error': 'No image file provided'}), 400
1443          image_data = file.read()
1444      else:
1445          # Raw bytes or base64
1446          image_data = request.data
1447          if not image_data:
1448              return jsonify({'error': 'No image data provided'}), 400
1449  
1450          # Check if base64 encoded
1451          try:
1452              import base64
1453              if image_data[:20].decode('utf-8', errors='ignore').startswith('data:image'):
1454                  # Data URL format
1455                  image_data = base64.b64decode(image_data.split(b',')[1])
1456              elif not image_data[:4] in [b'\x89PNG', b'\xff\xd8\xff', b'%PDF']:
1457                  # Try base64 decode
1458                  image_data = base64.b64decode(image_data)
1459          except:
1460              pass
1461  
1462      # Get metadata
1463      source = request.form.get('source') or request.args.get('source', 'iphone')
1464      job_id = request.form.get('job_id') or request.args.get('job_id')
1465      tags_str = request.form.get('tags') or request.args.get('tags', '')
1466      tags = [t.strip() for t in tags_str.split(',') if t.strip()]
1467  
1468      gps = None
1469      lat = request.form.get('gps_lat') or request.args.get('gps_lat')
1470      lon = request.form.get('gps_lon') or request.args.get('gps_lon')
1471      if lat and lon:
1472          gps = (float(lat), float(lon))
1473  
1474      timestamp = None
1475      ts_str = request.form.get('timestamp') or request.args.get('timestamp')
1476      if ts_str:
1477          timestamp = datetime.fromisoformat(ts_str)
1478  
1479      # Process
1480      asset = visual_capture.ingest_image(
1481          image_data=image_data,
1482          source=source,
1483          job_id=job_id,
1484          tags=tags,
1485          gps=gps,
1486          timestamp=timestamp
1487      )
1488  
1489      return jsonify({
1490          'status': 'ingested',
1491          'asset': asset.to_dict()
1492      })
1493  
1494  
1495  @app.route('/visual/recent')
1496  def visual_recent():
1497      """Get recently captured visual assets."""
1498      if not visual_capture:
1499          return jsonify({'error': 'Visual capture not initialized'}), 503
1500  
1501      hours = float(request.args.get('hours', 24))
1502      job_id = request.args.get('job_id')
1503      limit = int(request.args.get('limit', 50))
1504  
1505      assets = visual_capture.get_recent_assets(
1506          hours_back=hours,
1507          job_id=job_id,
1508          limit=limit
1509      )
1510  
1511      return jsonify({
1512          'count': len(assets),
1513          'assets': [a.to_dict() for a in assets]
1514      })
1515  
1516  
1517  @app.route('/visual/search')
1518  def visual_search():
1519      """Search visual assets by OCR text."""
1520      if not visual_capture:
1521          return jsonify({'error': 'Visual capture not initialized'}), 503
1522  
1523      query = request.args.get('q', '')
1524      if not query:
1525          return jsonify({'error': 'No query provided'}), 400
1526  
1527      limit = int(request.args.get('limit', 20))
1528      assets = visual_capture.search_ocr(query, limit=limit)
1529  
1530      return jsonify({
1531          'query': query,
1532          'count': len(assets),
1533          'assets': [a.to_dict() for a in assets]
1534      })
1535  
1536  
1537  # ==================== Work Sessions (Technician Mode) ====================
1538  
1539  @app.route('/work/start', methods=['POST'])
1540  def work_start():
1541      """
1542      Start a new work session (technician job).
1543  
1544      POST body (JSON):
1545          property_name: Name of property
1546          technician: Technician name (optional)
1547  
1548      Returns session ID for subsequent captures.
1549      """
1550      if not visual_capture:
1551          return jsonify({'error': 'Visual capture not initialized'}), 503
1552  
1553      data = request.get_json() or {}
1554      property_name = data.get('property_name', 'Unknown Property')
1555      technician = data.get('technician', '')
1556  
1557      session = visual_capture.start_work_session(property_name, technician)
1558  
1559      return jsonify({
1560          'status': 'started',
1561          'session_id': session.id,
1562          'property': session.property_name,
1563          'started_at': session.started_at.isoformat()
1564      })
1565  
1566  
1567  @app.route('/work/end/<session_id>', methods=['POST'])
1568  def work_end(session_id):
1569      """
1570      End a work session and generate work notes.
1571  
1572      Returns auto-generated work notes markdown.
1573      """
1574      if not visual_capture:
1575          return jsonify({'error': 'Visual capture not initialized'}), 503
1576  
1577      session = visual_capture.end_work_session(session_id)
1578      if not session:
1579          return jsonify({'error': 'Session not found'}), 404
1580  
1581      return jsonify({
1582          'status': 'ended',
1583          'session_id': session.id,
1584          'duration_minutes': session.duration_minutes,
1585          'work_notes': session.work_notes
1586      })
1587  
1588  
1589  @app.route('/work/note/<session_id>', methods=['POST'])
1590  def work_note(session_id):
1591      """
1592      Add a note to a work session.
1593  
1594      POST body (JSON):
1595          content: The note content
1596          type: 'narration', 'issue', 'resolution', 'part'
1597      """
1598      if not visual_capture:
1599          return jsonify({'error': 'Visual capture not initialized'}), 503
1600  
1601      data = request.get_json() or {}
1602      content = data.get('content', '')
1603      note_type = data.get('type', 'narration')
1604  
1605      if not content:
1606          return jsonify({'error': 'No content provided'}), 400
1607  
1608      visual_capture.add_to_session(session_id, content, note_type)
1609  
1610      return jsonify({'status': 'added', 'type': note_type})
1611  
1612  
1613  # ==================== Video Processing ====================
1614  
1615  @app.route('/video/process', methods=['POST'])
1616  def video_process():
1617      """
1618      Process a video file.
1619  
1620      POST body: multipart/form-data
1621          video: Video file
1622          job_id: Associated work session (optional)
1623          property_name: Property name (optional)
1624          extract_keyframes: true/false (default true)
1625          transcribe: true/false (default true)
1626  
1627      This is a long-running operation. Returns immediately with video_id,
1628      processing continues in background.
1629      """
1630      if not video_processor:
1631          return jsonify({'error': 'Video processor not initialized'}), 503
1632  
1633      file = request.files.get('video')
1634      if not file:
1635          return jsonify({'error': 'No video file provided'}), 400
1636  
1637      # Save to temp location
1638      import tempfile
1639      temp_dir = Path(tempfile.mkdtemp())
1640      temp_path = temp_dir / file.filename
1641      file.save(str(temp_path))
1642  
1643      job_id = request.form.get('job_id')
1644      property_name = request.form.get('property_name')
1645      extract_kf = request.form.get('extract_keyframes', 'true').lower() == 'true'
1646      transcribe = request.form.get('transcribe', 'true').lower() == 'true'
1647  
1648      # For now, process synchronously (would be async in production)
1649      try:
1650          result = video_processor.process_video(
1651              str(temp_path),
1652              job_id=job_id,
1653              property_name=property_name,
1654              extract_keyframes=extract_kf,
1655              transcribe=transcribe
1656          )
1657  
1658          return jsonify({
1659              'status': 'processed',
1660              'result': result.to_dict(),
1661              'transcript': result.full_transcript[:2000],
1662              'keyframes': [kf.to_dict() for kf in result.significant_keyframes[:10]]
1663          })
1664  
1665      except Exception as e:
1666          return jsonify({'error': str(e)}), 500
1667  
1668      finally:
1669          # Cleanup temp file
1670          if temp_path.exists():
1671              temp_path.unlink()
1672  
1673  
1674  @app.route('/video/<video_id>')
1675  def video_info(video_id):
1676      """Get info about a processed video."""
1677      if not video_processor:
1678          return jsonify({'error': 'Video processor not initialized'}), 503
1679  
1680      summary = video_processor.get_video_summary(video_id)
1681      if not summary:
1682          return jsonify({'error': 'Video not found'}), 404
1683  
1684      return jsonify(summary)
1685  
1686  
1687  # ==================== Mobile / iOS Shortcuts ====================
1688  
1689  @app.route('/mobile/status')
1690  def mobile_status():
1691      """
1692      Quick status optimized for mobile displays.
1693  
1694      Returns a compact summary suitable for iOS Shortcuts or widgets.
1695      """
1696      status_parts = []
1697  
1698      # Pipeline status
1699      if ingestion_pipeline:
1700          state = ingestion_pipeline.get_state()
1701          status_parts.append(f"Pipeline: {state['events_collected']} collected, {state['correlations_found']} correlations")
1702  
1703      # Recent aha count
1704      if ingestion_pipeline:
1705          aha = ingestion_pipeline.get_aha_moments(limit=100)
1706          status_parts.append(f"Aha moments: {len(aha)}")
1707  
1708      # Daemon status
1709      if daemon:
1710          stream_state = daemon.context_stream.get_state()
1711          status_parts.append(f"Context: {stream_state.item_count} items, {stream_state.pressure:.0%} pressure")
1712  
1713      return jsonify({
1714          'status': 'ok',
1715          'summary': ' | '.join(status_parts),
1716          'timestamp': datetime.now().isoformat()
1717      })
1718  
1719  
1720  @app.route('/mobile/brief')
1721  def mobile_brief():
1722      """
1723      Today's brief - insights and suggested next steps.
1724  
1725      Returns plain text suitable for reading on mobile.
1726      """
1727      lines = ["# Today's Brief", ""]
1728  
1729      # Recent aha moments
1730      if ingestion_pipeline:
1731          aha = ingestion_pipeline.get_aha_moments(limit=5)
1732          if aha:
1733              lines.append("## Recent Insights")
1734              for m in aha[:3]:
1735                  topics = ', '.join(m.get('topics', [])[:3]) if m.get('topics') else 'general'
1736                  lines.append(f"- [{topics}] confidence {m.get('confidence', 0):.0%}")
1737              lines.append("")
1738  
1739      # High-weight items needing attention
1740      if daemon:
1741          validation_items = daemon.context_stream.get_items_needing_validation()
1742          if validation_items:
1743              lines.append("## Needs Your Input")
1744              for item in validation_items[:3]:
1745                  lines.append(f"- {item.content[:80]}...")
1746              lines.append("")
1747  
1748      # Suggested next steps
1749      lines.append("## Suggested Actions")
1750      lines.append("- Review validation-seeking moments")
1751      lines.append("- Check high-confidence correlations")
1752      lines.append("- Run backfill if data is stale")
1753  
1754      return '\n'.join(lines), 200, {'Content-Type': 'text/plain'}
1755  
1756  
1757  @app.route('/mobile/aha')
1758  def mobile_aha():
1759      """
1760      Recent aha moments in clean, readable format.
1761  
1762      Returns plain text list of recent insights.
1763      """
1764      if not ingestion_pipeline:
1765          return "Pipeline not initialized", 503
1766  
1767      limit = int(request.args.get('limit', 10))
1768      moments = ingestion_pipeline.get_aha_moments(limit=limit)
1769  
1770      lines = [f"# {len(moments)} Recent Aha Moments", ""]
1771  
1772      for i, m in enumerate(moments, 1):
1773          ts = m.get('timestamp', '')[:10]  # Just date
1774          topics = ', '.join(m.get('topics', [])[:4]) if m.get('topics') else ''
1775          conf = m.get('confidence', 0)
1776          browser = m.get('browser_context', '')
1777  
1778          lines.append(f"## {i}. {ts}")
1779          if topics:
1780              lines.append(f"Topics: {topics}")
1781          lines.append(f"Confidence: {conf:.0%}")
1782          if browser:
1783              lines.append(f"Researching: {browser[:60]}")
1784  
1785          # Content preview
1786          claude = m.get('claude_content', [])
1787          if claude and claude[0]:
1788              preview = claude[0][:150].replace('\n', ' ')
1789              lines.append(f"> {preview}...")
1790  
1791          lines.append("")
1792  
1793      return '\n'.join(lines), 200, {'Content-Type': 'text/plain'}
1794  
1795  
1796  @app.route('/mobile/capture', methods=['POST'])
1797  def mobile_capture():
1798      """
1799      Capture a quick thought from mobile.
1800  
1801      Body (JSON or plain text):
1802          thought: The thought to capture
1803          tags: Optional list of tags
1804  
1805      Injects directly into context stream with high weight.
1806      """
1807      if not daemon:
1808          return jsonify({'error': 'Daemon not initialized'}), 503
1809  
1810      # Accept both JSON and plain text
1811      if request.is_json:
1812          data = request.get_json()
1813          thought = data.get('thought', '')
1814          tags = data.get('tags', [])
1815      else:
1816          thought = request.data.decode('utf-8')
1817          tags = []
1818  
1819      if not thought:
1820          return jsonify({'error': 'No thought provided'}), 400
1821  
1822      # Analyze signals
1823      signals = detect_signals(thought)
1824  
1825      # Import here to avoid circular imports
1826      from core.attention.context_stream import StreamItem, Priority
1827  
1828      # Create high-priority item
1829      item = StreamItem(
1830          id=f"mobile-{datetime.now().strftime('%Y%m%d%H%M%S')}",
1831          content=thought,
1832          source="mobile_capture",
1833          priority=Priority.HIGH,
1834          signal_tags=signals.suggested_tags.union(set(tags)),
1835          signal_weight=signals.weight_modifier * 1.5,  # Boost for intentional capture
1836          needs_validation='?' in thought  # If it's a question, flag for validation
1837      )
1838  
1839      daemon.context_stream.add_item(item)
1840  
1841      return jsonify({
1842          'status': 'captured',
1843          'id': item.id,
1844          'weight': item.signal_weight,
1845          'tags': list(item.signal_tags)
1846      })
1847  
1848  
1849  # ==================== Overcast Webhook ====================
1850  
1851  @app.route('/overcast', methods=['POST'])
1852  def receive_overcast():
1853      """Receive OPML from iOS Shortcut."""
1854      opml_content = request.data.decode('utf-8')
1855  
1856      if not opml_content or '<opml' not in opml_content:
1857          return jsonify({
1858              'status': 'error',
1859              'message': 'Invalid OPML content'
1860          }), 400
1861  
1862      # Ensure directory exists
1863      OVERCAST_DIR.mkdir(parents=True, exist_ok=True)
1864  
1865      # Save with timestamp
1866      timestamp = datetime.now().strftime('%Y-%m-%d_%H%M%S')
1867      timestamped_path = OVERCAST_DIR / f'overcast_{timestamp}.opml'
1868      timestamped_path.write_text(opml_content)
1869  
1870      # Also save as latest
1871      latest_path = OVERCAST_DIR / 'latest.opml'
1872      latest_path.write_text(opml_content)
1873  
1874      # Parse stats
1875      stats = parse_opml_stats(opml_content)
1876  
1877      print(f"[Overcast] Received: {len(opml_content):,} bytes, {stats.get('podcasts', '?')} podcasts")
1878  
1879      return jsonify({
1880          'status': 'ok',
1881          'bytes': len(opml_content),
1882          'timestamp': timestamp,
1883          'stats': stats
1884      })
1885  
1886  
1887  @app.route('/overcast/latest')
1888  def overcast_latest():
1889      """Get info about latest Overcast sync."""
1890      latest_path = OVERCAST_DIR / 'latest.opml'
1891  
1892      if not latest_path.exists():
1893          return jsonify({
1894              'status': 'no_sync',
1895              'message': 'No OPML received yet'
1896          })
1897  
1898      content = latest_path.read_text()
1899      stats = parse_opml_stats(content)
1900      mtime = datetime.fromtimestamp(latest_path.stat().st_mtime)
1901  
1902      return jsonify({
1903          'status': 'ok',
1904          'last_sync': mtime.isoformat(),
1905          'bytes': len(content),
1906          'stats': stats
1907      })
1908  
1909  
1910  def parse_opml_stats(opml_content: str) -> dict:
1911      """Extract stats from OPML."""
1912      try:
1913          root = ET.fromstring(opml_content)
1914          body = root.find('body')
1915  
1916          podcasts = episodes = played = 0
1917          for outline in body.iter('outline'):
1918              if outline.get('type') == 'rss':
1919                  podcasts += 1
1920              elif outline.get('type') == 'podcast-episode':
1921                  episodes += 1
1922                  if outline.get('played') == '1':
1923                      played += 1
1924  
1925          return {'podcasts': podcasts, 'episodes': episodes, 'played': played}
1926      except Exception as e:
1927          return {'error': str(e)}
1928  
1929  
1930  # ==================== Startup ====================
1931  
1932  # Initialize daemon on module load (for gunicorn)
1933  init_daemon()
1934  
1935  
1936  if __name__ == '__main__':
1937      print(f"""
1938  ╔══════════════════════════════════════════════════════════════╗
1939  ║                  Sovereign OS Server                          ║
1940  ╠══════════════════════════════════════════════════════════════╣
1941  ║  Port: {PORT}                                                   ║
1942  ║  Sessions: {SESSIONS_DIR}
1943  ║  Daily: {DAILY_DIR}
1944  ║                                                              ║
1945  ║  Endpoints:                                                  ║
1946  ║    /                      - Dashboard                        ║
1947  ║    /health                - Health check                     ║
1948  ║    /attention/status      - Attention state                  ║
1949  ║    /attention/sessions    - Active sessions                  ║
1950  ║    /attention/attractors  - Cross-session topics             ║
1951  ║    /attention/coherence   - Coherence risk                   ║
1952  ║    /attention/stream      - Context stream                   ║
1953  ║    /overcast              - POST OPML webhook                ║
1954  ║                                                              ║
1955  ║  "Attention is all you need"                                 ║
1956  ╚══════════════════════════════════════════════════════════════╝
1957  """)
1958      app.run(host='0.0.0.0', port=PORT, debug=False, threaded=True)