/ run_first_officer_local.py
run_first_officer_local.py
   1  #!/usr/bin/env python3
   2  """
   3  Sovereign OS - Local First Officer Daemon
   4  ==========================================
   5  
   6  Lightweight daemon that runs on your workstation (Mac/Linux).
   7  Watches Claude Code transcripts in real-time, detects insights,
   8  and pushes them to the central nodebox server.
   9  
  10  Usage:
  11      python3 run_first_officer_local.py
  12      python3 run_first_officer_local.py --nodebox 100.79.197.96
  13  
  14  The daemon:
  15  1. Watches ~/.claude/projects/ for active transcripts
  16  2. Detects significant events (new principles, axiom fixes, protocol changes)
  17  3. Pushes insights to nodebox Hypercore daemon
  18  4. Updates local LIVE-COMPRESSION.md in real-time
  19  """
  20  
  21  import os
  22  import sys
  23  import json
  24  import time
  25  import re
  26  import hashlib
  27  import argparse
  28  import signal
  29  import socket
  30  from pathlib import Path
  31  from datetime import datetime
  32  from typing import Dict, List, Optional, Any
  33  from dataclasses import dataclass, field
  34  import urllib.request
  35  import urllib.error
  36  
  37  # Optional: watchdog for efficient file monitoring
  38  try:
  39      from watchdog.observers import Observer
  40      from watchdog.events import FileSystemEventHandler, FileModifiedEvent
  41      WATCHDOG_AVAILABLE = True
  42  except ImportError:
  43      WATCHDOG_AVAILABLE = False
  44      print("Note: Install watchdog for more efficient monitoring: pip install watchdog")
  45  
  46  
  47  # ═══════════════════════════════════════════════════════════════════════════════
  48  # MESH NETWORK INTEGRATION
  49  # ═══════════════════════════════════════════════════════════════════════════════
  50  
  51  MESH_PORT = 7778
  52  
  53  def publish_to_mesh(message_type: str, payload: Dict[str, Any]) -> bool:
  54      """
  55      Publish insight to sovereign mesh for cross-instance consciousness.
  56  
  57      All Claude instances receive this. Meta FO aggregates.
  58      """
  59      try:
  60          msg = json.dumps({
  61              "type": message_type,
  62              "from": "first-officer-local",
  63              "payload": payload,
  64              "timestamp": datetime.now().isoformat()
  65          }).encode()
  66  
  67          req = urllib.request.Request(
  68              f"http://localhost:{MESH_PORT}/publish",
  69              data=msg,
  70              headers={"Content-Type": "application/json"},
  71              method="POST"
  72          )
  73          with urllib.request.urlopen(req, timeout=2) as resp:
  74              return resp.status == 200
  75      except (urllib.error.URLError, Exception):
  76          return False  # Mesh not running - local-only mode
  77  
  78  
  79  def publish_fo_state_to_mesh(state: Dict[str, Any]) -> bool:
  80      """
  81      Publish First Officer state to mesh for silence-led context buffer.
  82  
  83      This enables cross-instance awareness - other Claude sessions
  84      can bootstrap with context from all running First Officers.
  85      """
  86      import socket
  87      try:
  88          # Add node identification
  89          state_with_id = {
  90              "nodeId": socket.gethostname(),
  91              "timestamp": datetime.now().isoformat(),
  92              **state
  93          }
  94  
  95          data = json.dumps(state_with_id).encode()
  96          req = urllib.request.Request(
  97              f"http://localhost:{MESH_PORT}/fo-state",
  98              data=data,
  99              headers={"Content-Type": "application/json"},
 100              method="POST"
 101          )
 102          with urllib.request.urlopen(req, timeout=2) as resp:
 103              return resp.status == 200
 104      except (urllib.error.URLError, Exception):
 105          return False  # Mesh not running - local-only mode
 106  
 107  
 108  def check_mesh_available() -> bool:
 109      """Check if mesh daemon is reachable."""
 110      try:
 111          with urllib.request.urlopen(f"http://localhost:{MESH_PORT}/health", timeout=2) as resp:
 112              return resp.status == 200
 113      except:
 114          return False
 115  
 116  
 117  def publish_aha_moment(payload: Dict[str, Any]) -> bool:
 118      """
 119      Publish high-importance insight as Aha Moment to mesh.
 120  
 121      These get priority placement in all Claude instance bootstraps.
 122      Uses dedicated /aha-moment endpoint for proper storage.
 123      """
 124      try:
 125          data = json.dumps(payload).encode()
 126          req = urllib.request.Request(
 127              f"http://localhost:{MESH_PORT}/aha-moment",
 128              data=data,
 129              headers={"Content-Type": "application/json"},
 130              method="POST"
 131          )
 132          with urllib.request.urlopen(req, timeout=2) as resp:
 133              return resp.status == 200
 134      except (urllib.error.URLError, Exception):
 135          return False  # Mesh not running - local-only mode
 136  
 137  
 138  def publish_principle_candidate(payload: Dict[str, Any]) -> bool:
 139      """
 140      Publish high-divergence insight as Principle Candidate to mesh.
 141  
 142      When D-score > 0.5, an insight might represent a new principle
 143      that should be tested across all instances.
 144      """
 145      try:
 146          data = json.dumps(payload).encode()
 147          req = urllib.request.Request(
 148              f"http://localhost:{MESH_PORT}/principle-candidate",
 149              data=data,
 150              headers={"Content-Type": "application/json"},
 151              method="POST"
 152          )
 153          with urllib.request.urlopen(req, timeout=2) as resp:
 154              return resp.status == 200
 155      except (urllib.error.URLError, Exception):
 156          return False  # Mesh not running - local-only mode
 157  
 158  
 159  def fetch_principle_candidates() -> List[Dict[str, Any]]:
 160      """
 161      Fetch principle candidates from mesh for evaluation.
 162  
 163      Returns list of principle candidates that need voting.
 164      """
 165      try:
 166          with urllib.request.urlopen(f"http://localhost:{MESH_PORT}/context", timeout=2) as resp:
 167              ctx = json.loads(resp.read())
 168              return ctx.get("principleCandidates", [])
 169      except (urllib.error.URLError, Exception):
 170          return []
 171  
 172  
 173  def vote_on_principle(name: str, vote: str, reason: str) -> bool:
 174      """
 175      Submit a vote on a principle candidate.
 176  
 177      Args:
 178          name: Principle candidate name
 179          vote: 'support' or 'reject'
 180          reason: Why this vote was cast
 181      """
 182      try:
 183          data = json.dumps({
 184              "name": name,
 185              "vote": vote,
 186              "from": socket.gethostname(),
 187              "reason": reason
 188          }).encode()
 189          req = urllib.request.Request(
 190              f"http://localhost:{MESH_PORT}/principle-vote",
 191              data=data,
 192              headers={"Content-Type": "application/json"},
 193              method="POST"
 194          )
 195          with urllib.request.urlopen(req, timeout=2) as resp:
 196              return resp.status == 200
 197      except (urllib.error.URLError, Exception):
 198          return False
 199  
 200  
 201  def fetch_confirmed_principles() -> List[Dict[str, Any]]:
 202      """
 203      Fetch confirmed principles ready to be written to CLAUDE.md.
 204      """
 205      try:
 206          with urllib.request.urlopen(f"http://localhost:{MESH_PORT}/confirmed-principles", timeout=2) as resp:
 207              data = json.loads(resp.read())
 208              return data.get("confirmed", [])
 209      except (urllib.error.URLError, Exception):
 210          return []
 211  
 212  
 213  def mark_principle_written(name: str) -> bool:
 214      """
 215      Mark a principle as written to CLAUDE.md.
 216      """
 217      try:
 218          data = json.dumps({"name": name}).encode()
 219          req = urllib.request.Request(
 220              f"http://localhost:{MESH_PORT}/mark-principle-written",
 221              data=data,
 222              headers={"Content-Type": "application/json"},
 223              method="POST"
 224          )
 225          with urllib.request.urlopen(req, timeout=2) as resp:
 226              return resp.status == 200
 227      except (urllib.error.URLError, Exception):
 228          return False
 229  
 230  
 231  # ═══════════════════════════════════════════════════════════════════════════════
 232  # CONFIGURATION
 233  # ═══════════════════════════════════════════════════════════════════════════════
 234  
 235  @dataclass
 236  class LocalFOConfig:
 237      """Configuration for local First Officer daemon."""
 238      # Paths
 239      claude_projects_dir: Path = field(default_factory=lambda: Path.home() / ".claude" / "projects")
 240      sovereign_os_dir: Path = field(default_factory=lambda: Path.home() / "repos" / "Sovereign_OS")
 241  
 242      # Nodebox connection
 243      nodebox_ip: str = "100.79.197.96"
 244      nodebox_port: int = 7777
 245  
 246      # Mesh network (local sovereign-mesh daemon)
 247      mesh_port: int = 7778
 248  
 249      # Detection settings
 250      poll_interval: float = 2.0  # seconds between transcript checks
 251      insight_cooldown: float = 30.0  # don't re-detect same insight within this window
 252  
 253      @property
 254      def mesh_url(self) -> str:
 255          return f"http://localhost:{self.mesh_port}"
 256  
 257      @property
 258      def live_compression_path(self) -> Path:
 259          return self.sovereign_os_dir / "sessions" / "LIVE-COMPRESSION.md"
 260  
 261      @property
 262      def fo_state_path(self) -> Path:
 263          return self.sovereign_os_dir / "sessions" / "FO-STATE.json"
 264  
 265      @property
 266      def nodebox_url(self) -> str:
 267          return f"http://{self.nodebox_ip}:{self.nodebox_port}"
 268  
 269      @property
 270      def stats_cache_path(self) -> Path:
 271          return Path.home() / ".claude" / "stats-cache.json"
 272  
 273      @property
 274      def economics_config_path(self) -> Path:
 275          return Path.home() / ".sovereign" / "economics.yaml"
 276  
 277  
 278  # ═══════════════════════════════════════════════════════════════════════════════
 279  # COST TRACKING (Bitcoin-anchored)
 280  # ═══════════════════════════════════════════════════════════════════════════════
 281  
 282  @dataclass
 283  class CostSnapshot:
 284      """Token usage and sat cost at a point in time."""
 285      timestamp: str
 286      input_tokens: int
 287      output_tokens: int
 288      cache_read_tokens: int
 289      cache_creation_tokens: int
 290      estimated_sats: int
 291      model: str = "claude-opus-4-5-20251101"
 292  
 293      def to_dict(self) -> dict:
 294          return {
 295              "timestamp": self.timestamp,
 296              "input_tokens": self.input_tokens,
 297              "output_tokens": self.output_tokens,
 298              "cache_read_tokens": self.cache_read_tokens,
 299              "cache_creation_tokens": self.cache_creation_tokens,
 300              "estimated_sats": self.estimated_sats,
 301              "model": self.model
 302          }
 303  
 304  
 305  class CostTracker:
 306      """Tracks Claude token usage with three cost perspectives:
 307  
 308      1. Subscription cost: Fixed $200/month = 200k sats (what you pay)
 309      2. API cost: Anthropic's published API rates (market rate)
 310      3. Infrastructure cost: Estimated true cost to serve (pre-subsidy)
 311  
 312      The infrastructure cost reflects Anthropic's actual compute costs before
 313      CapEx subsidies and market-share pricing. Industry estimates suggest
 314      true inference costs are 2-3x API pricing.
 315      """
 316  
 317      # API rates (Anthropic published pricing - subsidized for market share)
 318      API_RATES = {
 319          "input_sats_per_mtok": 3000,      # ~$3/MTok input
 320          "output_sats_per_mtok": 15000,    # ~$15/MTok output
 321          "cache_read_sats_per_mtok": 300,  # ~$0.30/MTok cache read
 322          "cache_create_sats_per_mtok": 3750,  # ~$3.75/MTok cache creation
 323      }
 324  
 325      # Infrastructure multiplier (API rates × this = estimated true cost)
 326      # Anthropic operates at a loss for market share; true costs are higher
 327      # Load from config or use default
 328      INFRA_MULTIPLIER = 2.5
 329  
 330      @classmethod
 331      def get_infra_rates(cls) -> dict:
 332          """Get infrastructure rates based on current multiplier."""
 333          return {
 334              "input_sats_per_mtok": int(cls.API_RATES["input_sats_per_mtok"] * cls.INFRA_MULTIPLIER),
 335              "output_sats_per_mtok": int(cls.API_RATES["output_sats_per_mtok"] * cls.INFRA_MULTIPLIER),
 336              "cache_read_sats_per_mtok": int(cls.API_RATES["cache_read_sats_per_mtok"] * cls.INFRA_MULTIPLIER),
 337              "cache_create_sats_per_mtok": int(cls.API_RATES["cache_create_sats_per_mtok"] * cls.INFRA_MULTIPLIER),
 338          }
 339  
 340      # Subscription defaults
 341      MONTHLY_SATS = 200000  # $200/month at $100k BTC
 342      TARGET_TOKENS = 5000000  # 5M tokens = good monthly usage
 343      BTC_PRICE_USD = 100000  # Update when BTC price changes significantly
 344  
 345      @classmethod
 346      def sats_to_usd(cls, sats: int) -> float:
 347          """Convert sats to USD at current BTC price."""
 348          return round(sats / 100_000_000 * cls.BTC_PRICE_USD, 2)
 349  
 350      @classmethod
 351      def format_sats_usd(cls, sats: int) -> str:
 352          """Format sats with USD equivalent: '200,000 sats ($200.00)'"""
 353          usd = cls.sats_to_usd(sats)
 354          return f"{sats:,} sats (${usd:,.2f})"
 355  
 356      def __init__(self, config: LocalFOConfig):
 357          self.config = config
 358          self.monthly_sats = self.MONTHLY_SATS
 359          self.target_tokens = self.TARGET_TOKENS
 360          self._load_economics_config()
 361          self.last_snapshot: Optional[CostSnapshot] = None
 362          self.session_start_snapshot: Optional[CostSnapshot] = None
 363  
 364      def _load_economics_config(self):
 365          """Load operator economics config if available."""
 366          try:
 367              import yaml
 368              if self.config.economics_config_path.exists():
 369                  with open(self.config.economics_config_path) as f:
 370                      econ = yaml.safe_load(f)
 371                      if econ:
 372                          # Load BTC price from operator config
 373                          if "operator" in econ and "btc_price_usd" in econ["operator"]:
 374                              CostTracker.BTC_PRICE_USD = econ["operator"]["btc_price_usd"]
 375                          # Load Claude config
 376                          if "claude" in econ:
 377                              claude = econ["claude"]
 378                              if "monthly_sats" in claude:
 379                                  self.monthly_sats = claude["monthly_sats"]
 380                              if "efficiency" in claude and "target_tokens_monthly" in claude["efficiency"]:
 381                                  self.target_tokens = claude["efficiency"]["target_tokens_monthly"]
 382                              # Load API rates if specified
 383                              if "api_equivalent" in claude:
 384                                  api = claude["api_equivalent"]
 385                                  if "input_sats_per_mtok" in api:
 386                                      CostTracker.API_RATES["input_sats_per_mtok"] = api["input_sats_per_mtok"]
 387                                  if "output_sats_per_mtok" in api:
 388                                      CostTracker.API_RATES["output_sats_per_mtok"] = api["output_sats_per_mtok"]
 389                                  if "cache_read_sats_per_mtok" in api:
 390                                      CostTracker.API_RATES["cache_read_sats_per_mtok"] = api["cache_read_sats_per_mtok"]
 391                              # Load infrastructure multiplier
 392                              if "infrastructure" in claude and "multiplier" in claude["infrastructure"]:
 393                                  CostTracker.INFRA_MULTIPLIER = claude["infrastructure"]["multiplier"]
 394          except ImportError:
 395              pass  # yaml not installed, use defaults
 396          except Exception as e:
 397              print(f"  Warning: Could not load economics config: {e}")
 398  
 399      def read_stats_cache(self) -> Optional[dict]:
 400          """Read Claude Code stats cache."""
 401          if not self.config.stats_cache_path.exists():
 402              return None
 403          try:
 404              with open(self.config.stats_cache_path) as f:
 405                  return json.load(f)
 406          except Exception as e:
 407              print(f"  Warning: Could not read stats cache: {e}")
 408              return None
 409  
 410      def get_model_usage(self, model: str = "claude-opus-4-5-20251101") -> Optional[dict]:
 411          """Get usage stats for a specific model."""
 412          stats = self.read_stats_cache()
 413          if not stats or "modelUsage" not in stats:
 414              return None
 415          return stats.get("modelUsage", {}).get(model)
 416  
 417      def calculate_api_equivalent_sats(self, usage: dict) -> int:
 418          """Calculate what this usage would cost on API (Anthropic published rates)."""
 419          input_sats = (usage.get("inputTokens", 0) / 1_000_000) * self.API_RATES["input_sats_per_mtok"]
 420          output_sats = (usage.get("outputTokens", 0) / 1_000_000) * self.API_RATES["output_sats_per_mtok"]
 421          cache_read_sats = (usage.get("cacheReadInputTokens", 0) / 1_000_000) * self.API_RATES["cache_read_sats_per_mtok"]
 422          cache_create_sats = (usage.get("cacheCreationInputTokens", 0) / 1_000_000) * self.API_RATES["cache_create_sats_per_mtok"]
 423          return int(input_sats + output_sats + cache_read_sats + cache_create_sats)
 424  
 425      def calculate_infra_cost_sats(self, usage: dict) -> int:
 426          """Calculate estimated true infrastructure cost (pre-subsidy, pre-margin)."""
 427          infra_rates = self.get_infra_rates()
 428          input_sats = (usage.get("inputTokens", 0) / 1_000_000) * infra_rates["input_sats_per_mtok"]
 429          output_sats = (usage.get("outputTokens", 0) / 1_000_000) * infra_rates["output_sats_per_mtok"]
 430          cache_read_sats = (usage.get("cacheReadInputTokens", 0) / 1_000_000) * infra_rates["cache_read_sats_per_mtok"]
 431          cache_create_sats = (usage.get("cacheCreationInputTokens", 0) / 1_000_000) * infra_rates["cache_create_sats_per_mtok"]
 432          return int(input_sats + output_sats + cache_read_sats + cache_create_sats)
 433  
 434      def calculate_all_costs(self, usage: dict) -> dict:
 435          """Calculate all three cost perspectives.
 436  
 437          Returns:
 438              dict with:
 439              - subscription_sats: Fixed monthly cost (what you actually pay)
 440              - api_sats: Anthropic published API rates (market rate)
 441              - infra_sats: Estimated true infrastructure cost (real economic cost)
 442              - subsidy_from_api: api - subscription (Anthropic subsidizing via subscription)
 443              - subsidy_from_infra: infra - subscription (total subsidy you receive)
 444          """
 445          api_sats = self.calculate_api_equivalent_sats(usage)
 446          infra_sats = self.calculate_infra_cost_sats(usage)
 447  
 448          return {
 449              "subscription_sats": self.monthly_sats,
 450              "api_sats": api_sats,
 451              "infra_sats": infra_sats,
 452              "subsidy_from_api": max(0, api_sats - self.monthly_sats),
 453              "subsidy_from_infra": max(0, infra_sats - self.monthly_sats)
 454          }
 455  
 456      def calculate_efficiency(self, usage: dict) -> dict:
 457          """Calculate value extraction efficiency from subscription.
 458  
 459          Returns:
 460              dict with efficiency metrics across all three cost perspectives:
 461              - tokens_used: total tokens consumed
 462              - costs: { subscription, api, infra } sats
 463              - savings: { vs_api, vs_infra } sats saved
 464              - ratios: { api_efficiency, infra_efficiency } (higher = better value)
 465              - extraction_rate: tokens / target (are you using your subscription?)
 466          """
 467          total_tokens = (
 468              usage.get("inputTokens", 0) +
 469              usage.get("outputTokens", 0) +
 470              usage.get("cacheReadInputTokens", 0) +
 471              usage.get("cacheCreationInputTokens", 0)
 472          )
 473          all_costs = self.calculate_all_costs(usage)
 474  
 475          return {
 476              "tokens_used": total_tokens,
 477              "costs": {
 478                  "subscription_sats": all_costs["subscription_sats"],
 479                  "api_sats": all_costs["api_sats"],
 480                  "infra_sats": all_costs["infra_sats"]
 481              },
 482              "savings": {
 483                  "vs_api_sats": all_costs["subsidy_from_api"],
 484                  "vs_infra_sats": all_costs["subsidy_from_infra"]
 485              },
 486              "ratios": {
 487                  "api_efficiency": round(all_costs["api_sats"] / self.monthly_sats, 2) if self.monthly_sats > 0 else 0,
 488                  "infra_efficiency": round(all_costs["infra_sats"] / self.monthly_sats, 2) if self.monthly_sats > 0 else 0
 489              },
 490              "extraction_rate": round(total_tokens / self.target_tokens, 2) if self.target_tokens > 0 else 0
 491          }
 492  
 493      def get_current_snapshot(self) -> Optional[CostSnapshot]:
 494          """Get current cost snapshot."""
 495          usage = self.get_model_usage()
 496          if not usage:
 497              return None
 498  
 499          return CostSnapshot(
 500              timestamp=datetime.now().isoformat(),
 501              input_tokens=usage.get("inputTokens", 0),
 502              output_tokens=usage.get("outputTokens", 0),
 503              cache_read_tokens=usage.get("cacheReadInputTokens", 0),
 504              cache_creation_tokens=usage.get("cacheCreationInputTokens", 0),
 505              estimated_sats=self.calculate_api_equivalent_sats(usage)  # API equivalent for comparison
 506          )
 507  
 508      def get_delta(self) -> Optional[dict]:
 509          """Get delta since last snapshot."""
 510          current = self.get_current_snapshot()
 511          if not current:
 512              return None
 513  
 514          if not self.last_snapshot:
 515              self.last_snapshot = current
 516              return {
 517                  "delta_input_tokens": 0,
 518                  "delta_output_tokens": 0,
 519                  "delta_sats": 0,
 520                  "current": current.to_dict()
 521              }
 522  
 523          delta = {
 524              "delta_input_tokens": current.input_tokens - self.last_snapshot.input_tokens,
 525              "delta_output_tokens": current.output_tokens - self.last_snapshot.output_tokens,
 526              "delta_sats": current.estimated_sats - self.last_snapshot.estimated_sats,
 527              "current": current.to_dict(),
 528              "previous": self.last_snapshot.to_dict()
 529          }
 530  
 531          self.last_snapshot = current
 532          return delta
 533  
 534      def get_daily_usage(self, date: str = None) -> Optional[dict]:
 535          """Get usage for a specific date."""
 536          stats = self.read_stats_cache()
 537          if not stats or "dailyModelTokens" not in stats:
 538              return None
 539  
 540          if not date:
 541              date = datetime.now().strftime("%Y-%m-%d")
 542  
 543          for day in stats["dailyModelTokens"]:
 544              if day.get("date") == date:
 545                  return day.get("tokensByModel", {})
 546  
 547          return None
 548  
 549  
 550  # ═══════════════════════════════════════════════════════════════════════════════
 551  # INSIGHT DETECTION PATTERNS
 552  # ═══════════════════════════════════════════════════════════════════════════════
 553  
 554  @dataclass
 555  class DetectedInsight:
 556      """A detected insight from the transcript."""
 557      type: str  # principle, axiom_fix, protocol_change, delta_f, discovery
 558      content: str
 559      axioms_involved: List[str]
 560      importance: float  # 0-1
 561      timestamp: str
 562      context: str = ""
 563      energy_state: str = "unknown"  # potential | kinetic | transition
 564      divergence_score: float = 0.0  # D-score: how much this diverges from existing principles
 565  
 566      def to_dict(self) -> dict:
 567          return {
 568              "type": self.type,
 569              "content": self.content,
 570              "axioms_involved": self.axioms_involved,
 571              "importance": self.importance,
 572              "timestamp": self.timestamp,
 573              "context": self.context,
 574              "energy_state": self.energy_state,
 575              "divergence_score": self.divergence_score
 576          }
 577  
 578  
 579  class InsightDetector:
 580      """Detects significant insights from Claude Code transcript."""
 581  
 582      # Patterns that indicate significant insights
 583      PATTERNS = {
 584          "derived_principle": [
 585              r"new derived principle[:\s]+(.+)",
 586              r"\*\*Motion over position\*\*",
 587              r"derived principle from (A\d)",
 588              r"baked? into.+protocol",
 589          ],
 590          "axiom_fix": [
 591              r"ΔF\s*=\s*-(\d+\.?\d*)",  # Negative delta = improvement
 592              r"A(\d)\s+(?:violation|divergence).+(?:fixed|resolved|addressed)",
 593              r"axiom.+deviation.+→.+0\.\d+",
 594          ],
 595          "protocol_change": [
 596              r"Protocol v(\d+\.\d+)",
 597              r"updated?.+CLAUDE\.md",
 598              r"new.+mandatory.+protocol",
 599              r"rule.+is now.+mandatory",
 600          ],
 601          "significant_delta": [
 602              r"F:\s*(\d+\.?\d*)\s*→\s*(\d+\.?\d*)",  # F transition
 603              r"ΔF\s*=\s*([+-]?\d+\.?\d*)",
 604          ],
 605          "major_discovery": [
 606              r"insight[:\s]+(.{20,100})",
 607              r"humans evolved.+",
 608              r"savannah",  # Evolutionary insights
 609              r"this is.+major",
 610              r"key insight",
 611          ],
 612          "security_upgrade": [
 613              r"BIP-?39",
 614              r"PBKDF2",
 615              r"entropy.+bits",
 616              r"security upgrade",
 617          ],
 618          "potential_energy": [
 619              r"want me to.+build",
 620              r"propose.+architecture",
 621              r"here'?s how.+would work",
 622              r"implementation option",
 623              r"design.+pattern",
 624              r"planning.+document",
 625              r"spec.+out",
 626              r"idea.+for",
 627          ],
 628          "kinetic_energy": [
 629              r"committed.+pushed",
 630              r"running.+daemon",
 631              r"deployed",
 632              r"shipped",
 633              r"created.+file",
 634              r"installed.+service",
 635              r"now.+operational",
 636              r"is.+working",
 637          ],
 638          "energy_transition": [
 639              r"let me build",
 640              r"building.+now",
 641              r"implementing",
 642              r"releasing",
 643              r"potential.+→.+kinetic",
 644          ],
 645      }
 646  
 647      # Energy state keywords for classification
 648      ENERGY_KEYWORDS = {
 649          "potential": [
 650              "could", "would", "should", "propose", "design", "plan", "idea",
 651              "architecture", "spec", "want me to", "option", "approach"
 652          ],
 653          "kinetic": [
 654              "built", "shipped", "deployed", "running", "created", "committed",
 655              "pushed", "installed", "working", "operational", "done", "complete"
 656          ],
 657          "transition": [
 658              "building", "implementing", "creating", "deploying", "shipping",
 659              "let me", "now i'll", "writing"
 660          ],
 661      }
 662  
 663      # Axiom keywords for classification
 664      AXIOM_KEYWORDS = {
 665          "A0": ["boundary", "markov blanket", "sovereign", "inside", "outside", "membrane"],
 666          "A1": ["integration", "connection", "binding", "isolation", "telos"],
 667          "A2": ["life", "death", "motion", "static", "primitive", "calcified", "ornament"],
 668          "A3": ["navigation", "pole", "dynamic", "tension", "dyad", "shadow"],
 669          "A4": ["ergodic", "ruin", "survival", "asymmetry", "catastrophic", "terminal"],
 670      }
 671  
 672      def __init__(self):
 673          self.seen_insights: Dict[str, float] = {}  # hash -> timestamp
 674          self.cooldown = 30.0  # seconds
 675  
 676      def detect(self, text: str) -> List[DetectedInsight]:
 677          """Detect insights from a chunk of transcript text."""
 678          insights = []
 679  
 680          for insight_type, patterns in self.PATTERNS.items():
 681              for pattern in patterns:
 682                  matches = re.finditer(pattern, text, re.IGNORECASE | re.MULTILINE)
 683                  for match in matches:
 684                      # Create insight
 685                      content = match.group(0)
 686  
 687                      # Check cooldown
 688                      content_hash = hashlib.md5(content.encode()).hexdigest()[:12]
 689                      if content_hash in self.seen_insights:
 690                          if time.time() - self.seen_insights[content_hash] < self.cooldown:
 691                              continue
 692  
 693                      # Detect axioms involved
 694                      axioms = self._detect_axioms(text[max(0, match.start()-200):match.end()+200])
 695  
 696                      # Calculate importance
 697                      importance = self._calculate_importance(insight_type, content, axioms)
 698  
 699                      # Detect energy state
 700                      energy_state = self._detect_energy_state(
 701                          insight_type,
 702                          text[max(0, match.start()-200):match.end()+200]
 703                      )
 704  
 705                      # Calculate divergence score (D)
 706                      context_for_d = text[max(0, match.start()-200):match.end()+200]
 707                      divergence = self._calculate_divergence(
 708                          insight_type, content, context_for_d, axioms, importance
 709                      )
 710  
 711                      insight = DetectedInsight(
 712                          type=insight_type,
 713                          content=content,
 714                          axioms_involved=axioms,
 715                          importance=importance,
 716                          timestamp=datetime.now().isoformat(),
 717                          context=text[max(0, match.start()-100):match.end()+100],
 718                          energy_state=energy_state,
 719                          divergence_score=divergence
 720                      )
 721  
 722                      insights.append(insight)
 723                      self.seen_insights[content_hash] = time.time()
 724  
 725          return insights
 726  
 727      def _detect_energy_state(self, insight_type: str, context: str) -> str:
 728          """Detect energy state: potential, kinetic, or transition."""
 729          context_lower = context.lower()
 730  
 731          # Type-based hints
 732          if insight_type in ["potential_energy"]:
 733              return "potential"
 734          if insight_type in ["kinetic_energy"]:
 735              return "kinetic"
 736          if insight_type in ["energy_transition"]:
 737              return "transition"
 738  
 739          # Keyword-based detection
 740          potential_score = sum(1 for kw in self.ENERGY_KEYWORDS["potential"] if kw in context_lower)
 741          kinetic_score = sum(1 for kw in self.ENERGY_KEYWORDS["kinetic"] if kw in context_lower)
 742          transition_score = sum(1 for kw in self.ENERGY_KEYWORDS["transition"] if kw in context_lower)
 743  
 744          # Transition takes precedence (actively converting)
 745          if transition_score >= 2:
 746              return "transition"
 747  
 748          # Then check kinetic vs potential
 749          if kinetic_score > potential_score:
 750              return "kinetic"
 751          if potential_score > kinetic_score:
 752              return "potential"
 753  
 754          # Default based on insight type
 755          kinetic_types = ["protocol_change", "security_upgrade", "axiom_fix"]
 756          if insight_type in kinetic_types:
 757              return "kinetic"
 758  
 759          return "potential"  # Ideas default to potential
 760  
 761      def _detect_axioms(self, context: str) -> List[str]:
 762          """Detect which axioms are involved based on keywords."""
 763          axioms = []
 764          context_lower = context.lower()
 765  
 766          for axiom, keywords in self.AXIOM_KEYWORDS.items():
 767              if any(kw in context_lower for kw in keywords):
 768                  axioms.append(axiom)
 769  
 770          # Also check for explicit A0-A4 mentions
 771          for match in re.finditer(r'\bA([0-4])\b', context):
 772              axiom = f"A{match.group(1)}"
 773              if axiom not in axioms:
 774                  axioms.append(axiom)
 775  
 776          return sorted(axioms)
 777  
 778      def _calculate_importance(self, insight_type: str, content: str, axioms: List[str]) -> float:
 779          """Calculate importance score 0-1."""
 780          base_scores = {
 781              "derived_principle": 0.9,
 782              "protocol_change": 0.85,
 783              "security_upgrade": 0.8,
 784              "axiom_fix": 0.7,
 785              "significant_delta": 0.6,
 786              "major_discovery": 0.75,
 787          }
 788  
 789          score = base_scores.get(insight_type, 0.5)
 790  
 791          # Boost for multiple axioms involved
 792          score += len(axioms) * 0.05
 793  
 794          # Boost for certain keywords
 795          if "mandatory" in content.lower():
 796              score += 0.1
 797          if "savannah" in content.lower() or "evolved" in content.lower():
 798              score += 0.15
 799  
 800          return min(1.0, score)
 801  
 802      def _calculate_divergence(self, insight_type: str, content: str, context: str,
 803                                axioms: List[str], importance: float) -> float:
 804          """
 805          Calculate divergence score (D) 0-1.
 806  
 807          D-score measures how much an insight diverges from existing principles.
 808          High D (>0.5) suggests a potential new principle candidate.
 809  
 810          Factors:
 811          - High importance + few axiom matches = novel insight
 812          - Contains principle-like language ("should", "must", "always")
 813          - Insight types that suggest principles
 814          - Novel concepts not covered by axiom keywords
 815          """
 816          d_score = 0.0
 817          content_lower = content.lower()
 818          context_lower = context.lower() if context else ""
 819  
 820          # Factor 1: High importance with low axiom coverage (novel insight)
 821          # If important but doesn't match existing axioms, it might be new
 822          if importance >= 0.7 and len(axioms) <= 1:
 823              d_score += 0.25
 824          elif importance >= 0.6 and len(axioms) == 0:
 825              d_score += 0.30
 826  
 827          # Factor 2: Principle-like language patterns
 828          principle_patterns = [
 829              r"\bshould\b.*\b(always|never)\b",
 830              r"\bmust\b.*\b(be|have|do)\b",
 831              r"\b(always|never)\b.*\b(do|should|must)\b",
 832              r"\bprinciple\b",
 833              r"\brule\b.*\b(is|should)\b",
 834              r"\baxiom\b",
 835              r"\bthe key is\b",
 836              r"\bfundamental\b",
 837              r"\binvariant\b",
 838              r"\bconstraint\b",
 839          ]
 840          principle_matches = sum(
 841              1 for p in principle_patterns
 842              if re.search(p, content_lower) or re.search(p, context_lower)
 843          )
 844          d_score += min(0.25, principle_matches * 0.08)
 845  
 846          # Factor 3: Insight types that suggest principles
 847          principle_types = {
 848              "derived_principle": 0.35,  # Explicitly about principles
 849              "major_discovery": 0.20,    # Discoveries often become principles
 850              "protocol_change": 0.15,    # Protocol changes embody principles
 851          }
 852          d_score += principle_types.get(insight_type, 0.0)
 853  
 854          # Factor 4: Contains generalizable statements
 855          generalizable_patterns = [
 856              r"\bevery\b.*\b(system|process|agent)\b",
 857              r"\ball\b.*\b(should|must|need)\b",
 858              r"\bwhenever\b",
 859              r"\bin general\b",
 860              r"\bas a rule\b",
 861              r"\bby default\b",
 862              r"\bthe pattern is\b",
 863          ]
 864          generalizable_matches = sum(
 865              1 for p in generalizable_patterns
 866              if re.search(p, content_lower) or re.search(p, context_lower)
 867          )
 868          d_score += min(0.15, generalizable_matches * 0.05)
 869  
 870          # Factor 5: Novel domain concepts (not in axiom keywords)
 871          # Check if content contains substantial concepts not in our axiom vocabulary
 872          all_axiom_words = set()
 873          for words in self.AXIOM_KEYWORDS.values():
 874              all_axiom_words.update(w.lower() for w in words)
 875  
 876          content_words = set(re.findall(r'\b[a-z]{5,}\b', content_lower))
 877          novel_words = content_words - all_axiom_words - {
 878              'about', 'which', 'would', 'could', 'should', 'where', 'there',
 879              'their', 'these', 'those', 'being', 'having', 'doing', 'using',
 880              'claude', 'system', 'instance', 'session', 'context', 'token',
 881          }
 882          if len(novel_words) >= 5:
 883              d_score += 0.10
 884  
 885          return min(1.0, d_score)
 886  
 887  
 888  # ═══════════════════════════════════════════════════════════════════════════════
 889  # TRANSCRIPT WATCHER
 890  # ═══════════════════════════════════════════════════════════════════════════════
 891  
 892  class TranscriptWatcher:
 893      """Watches Claude Code transcript files for changes."""
 894  
 895      def __init__(self, config: LocalFOConfig):
 896          self.config = config
 897          self.detector = InsightDetector()
 898          self.file_positions: Dict[str, int] = {}  # Track read position per file
 899          self.active_session: Optional[str] = None
 900  
 901      def find_active_transcript(self) -> Optional[Path]:
 902          """Find the most recently modified transcript file."""
 903          if not self.config.claude_projects_dir.exists():
 904              return None
 905  
 906          transcripts = []
 907          for project_dir in self.config.claude_projects_dir.iterdir():
 908              if project_dir.is_dir():
 909                  for jsonl in project_dir.glob("*.jsonl"):
 910                      transcripts.append((jsonl, jsonl.stat().st_mtime))
 911  
 912          if not transcripts:
 913              return None
 914  
 915          # Return most recently modified
 916          transcripts.sort(key=lambda x: x[1], reverse=True)
 917          return transcripts[0][0]
 918  
 919      def read_new_content(self, transcript_path: Path) -> str:
 920          """Read new content from transcript since last read."""
 921          path_str = str(transcript_path)
 922  
 923          # Get current position
 924          current_pos = self.file_positions.get(path_str, 0)
 925  
 926          try:
 927              with open(transcript_path, 'r') as f:
 928                  f.seek(current_pos)
 929                  new_content = f.read()
 930                  self.file_positions[path_str] = f.tell()
 931                  return new_content
 932          except Exception as e:
 933              print(f"Error reading transcript: {e}")
 934              return ""
 935  
 936      def extract_assistant_content(self, jsonl_content: str) -> str:
 937          """Extract assistant messages from JSONL transcript."""
 938          text_parts = []
 939  
 940          for line in jsonl_content.strip().split('\n'):
 941              if not line:
 942                  continue
 943              try:
 944                  entry = json.loads(line)
 945                  # Look for assistant messages
 946                  if entry.get('type') == 'assistant':
 947                      message = entry.get('message', {})
 948                      content = message.get('content', [])
 949                      if isinstance(content, list):
 950                          for block in content:
 951                              if isinstance(block, dict) and block.get('type') == 'text':
 952                                  text_parts.append(block.get('text', ''))
 953                      elif isinstance(content, str):
 954                          text_parts.append(content)
 955              except json.JSONDecodeError:
 956                  continue
 957  
 958          return '\n'.join(text_parts)
 959  
 960  
 961  # ═══════════════════════════════════════════════════════════════════════════════
 962  # NODEBOX PUSHER
 963  # ═══════════════════════════════════════════════════════════════════════════════
 964  
 965  class NodeboxPusher:
 966      """Pushes insights to nodebox Hypercore daemon."""
 967  
 968      def __init__(self, config: LocalFOConfig):
 969          self.config = config
 970  
 971      def push_insight(self, insight: DetectedInsight) -> bool:
 972          """Push a single insight to nodebox."""
 973          url = f"{self.config.nodebox_url}/insight"
 974  
 975          payload = json.dumps({
 976              "source": "local-fo",
 977              "machine": os.uname().nodename,
 978              "insight": insight.to_dict()
 979          }).encode('utf-8')
 980  
 981          try:
 982              req = urllib.request.Request(
 983                  url,
 984                  data=payload,
 985                  headers={'Content-Type': 'application/json'},
 986                  method='POST'
 987              )
 988              with urllib.request.urlopen(req, timeout=5) as resp:
 989                  return resp.status == 200
 990          except urllib.error.URLError as e:
 991              print(f"  Warning: Could not reach nodebox: {e}")
 992              return False
 993          except Exception as e:
 994              print(f"  Warning: Push failed: {e}")
 995              return False
 996  
 997      def check_nodebox(self) -> bool:
 998          """Check if nodebox is reachable."""
 999          try:
1000              url = f"{self.config.nodebox_url}/status"
1001              with urllib.request.urlopen(url, timeout=3) as resp:
1002                  return resp.status == 200
1003          except:
1004              return False
1005  
1006  
1007  # ═══════════════════════════════════════════════════════════════════════════════
1008  # LOCAL STATE UPDATER
1009  # ═══════════════════════════════════════════════════════════════════════════════
1010  
1011  class LocalStateUpdater:
1012      """Updates local Sovereign OS state files."""
1013  
1014      def __init__(self, config: LocalFOConfig):
1015          self.config = config
1016  
1017      def update_fo_state(self, insights: List[DetectedInsight]):
1018          """Update FO-STATE.json with new insights."""
1019          state_path = self.config.fo_state_path
1020  
1021          # Load existing state
1022          state = {"axiom_activity": {}, "resonances": [], "insights": [], "energy": {}}
1023          if state_path.exists():
1024              try:
1025                  with open(state_path) as f:
1026                      state = json.load(f)
1027              except:
1028                  pass
1029  
1030          # Update axiom activity
1031          if "axiom_activity" not in state:
1032              state["axiom_activity"] = {"A0": 0, "A1": 0, "A2": 0, "A3": 0, "A4": 0}
1033  
1034          for insight in insights:
1035              for axiom in insight.axioms_involved:
1036                  state["axiom_activity"][axiom] = state["axiom_activity"].get(axiom, 0) + 1
1037  
1038          # Update energy tracking
1039          if "energy" not in state:
1040              state["energy"] = {"potential": 0, "kinetic": 0, "transition": 0}
1041  
1042          for insight in insights:
1043              energy = insight.energy_state
1044              if energy in state["energy"]:
1045                  state["energy"][energy] = state["energy"].get(energy, 0) + 1
1046  
1047          # Add insights
1048          if "insights" not in state:
1049              state["insights"] = []
1050  
1051          for insight in insights:
1052              state["insights"].append(insight.to_dict())
1053  
1054          # Keep only recent insights (last 50)
1055          state["insights"] = state["insights"][-50:]
1056  
1057          # Update timestamp
1058          state["last_insight"] = datetime.now().isoformat()
1059          state["last_heartbeat"] = datetime.now().isoformat()
1060  
1061          # Write back
1062          with open(state_path, 'w') as f:
1063              json.dump(state, f, indent=2)
1064  
1065      def update_live_compression(self, insights: List[DetectedInsight]):
1066          """Update LIVE-COMPRESSION.md with real-time insights."""
1067          lc_path = self.config.live_compression_path
1068  
1069          # Read existing content
1070          existing = ""
1071          if lc_path.exists():
1072              with open(lc_path) as f:
1073                  existing = f.read()
1074  
1075          # Find or create Real-Time Insights section
1076          insights_section = "\n## Real-Time Insights (Local FO)\n\n"
1077          for insight in insights:
1078              energy_icon = {"potential": "⚡", "kinetic": "🚀", "transition": "🔄"}.get(insight.energy_state, "❓")
1079              insights_section += f"- **[{insight.timestamp[:19]}]** {energy_icon} ({insight.type}) "
1080              insights_section += f"{insight.content[:100]}..."
1081              if insight.axioms_involved:
1082                  insights_section += f" [{', '.join(insight.axioms_involved)}]"
1083              insights_section += f" | {insight.energy_state} | importance: {insight.importance:.2f}\n"
1084  
1085          # Insert or update section
1086          marker = "## Real-Time Insights (Local FO)"
1087          if marker in existing:
1088              # Replace existing section
1089              pattern = r"## Real-Time Insights \(Local FO\)\n\n(?:- .+\n)*"
1090              updated = re.sub(pattern, insights_section, existing)
1091          else:
1092              # Add before Resurrection Seed if exists, else at end
1093              if "## Resurrection Seed" in existing:
1094                  updated = existing.replace("## Resurrection Seed",
1095                                            f"{insights_section}\n## Resurrection Seed")
1096              else:
1097                  updated = existing + "\n" + insights_section
1098  
1099          with open(lc_path, 'w') as f:
1100              f.write(updated)
1101  
1102      def update_live_compression_costs(self, costs: dict, session_hours: float = None):
1103          """Update LIVE-COMPRESSION.md with current cost tracking."""
1104          lc_path = self.config.live_compression_path
1105  
1106          # Read existing content
1107          existing = ""
1108          if lc_path.exists():
1109              with open(lc_path) as f:
1110                  existing = f.read()
1111  
1112          # Build costs section
1113          sats = costs.get("sats", {})
1114          usd = costs.get("usd", {})
1115          eff = costs.get("efficiency", {})
1116  
1117          costs_section = "\n## Session Economics (Bitcoin-Anchored)\n\n"
1118          costs_section += f"*Updated: {costs.get('updated', 'unknown')[:19]}*\n\n"
1119          costs_section += "```\n"
1120  
1121          if session_hours:
1122              # Calculate session attention cost
1123              sats_per_hour = 172110  # Default, should load from config
1124              attention_sats = int(session_hours * sats_per_hour)
1125              attention_usd = round(attention_sats / 100_000_000 * 100000, 2)
1126              costs_section += f"SESSION ({session_hours:.1f} hrs):\n"
1127              costs_section += f"  Attention:     {attention_sats:,} sats (${attention_usd:,.2f})\n"
1128              costs_section += f"  Claude:        ~3,333 sats ($3.33)\n"
1129              costs_section += f"  Total:         {attention_sats + 3333:,} sats (${attention_usd + 3.33:,.2f})\n\n"
1130  
1131          costs_section += f"CLAUDE (All-Time):\n"
1132          costs_section += f"  Subscription:  {sats.get('subscription_sats', 0):,} sats (${usd.get('subscription', 0):.2f})\n"
1133          costs_section += f"  API value:     {sats.get('api_sats', 0):,} sats (${usd.get('api', 0):.2f})\n"
1134          costs_section += f"  Infra value:   {sats.get('infra_sats', 0):,} sats (${usd.get('infra', 0):.2f})\n"
1135          costs_section += f"  Efficiency:    {eff.get('api_efficiency', 0)}x API | {eff.get('infra_efficiency', 0)}x infra\n"
1136          costs_section += "```\n"
1137  
1138          # Insert or update section
1139          marker = "## Session Economics (Bitcoin-Anchored)"
1140          if marker in existing:
1141              # Replace existing section (match through BOTH opening AND closing backticks)
1142              pattern = r"## Session Economics \(Bitcoin-Anchored\)\n\n.*?```\n.*?```\n"
1143              updated = re.sub(pattern, costs_section, existing, flags=re.DOTALL)
1144          else:
1145              # Add after Real-Time Insights if exists, else at end
1146              if "## Real-Time Insights (Local FO)" in existing:
1147                  updated = existing.replace("## Real-Time Insights (Local FO)",
1148                                            f"{costs_section}\n## Real-Time Insights (Local FO)")
1149              elif "## Resurrection Seed" in existing:
1150                  updated = existing.replace("## Resurrection Seed",
1151                                            f"{costs_section}\n## Resurrection Seed")
1152              else:
1153                  updated = existing + "\n" + costs_section
1154  
1155          with open(lc_path, 'w') as f:
1156              f.write(updated)
1157  
1158  
1159  # ═══════════════════════════════════════════════════════════════════════════════
1160  # MAIN DAEMON
1161  # ═══════════════════════════════════════════════════════════════════════════════
1162  
1163  class LocalFirstOfficer:
1164      """Main daemon class."""
1165  
1166      def __init__(self, config: LocalFOConfig):
1167          self.config = config
1168          self.watcher = TranscriptWatcher(config)
1169          self.pusher = NodeboxPusher(config)
1170          self.updater = LocalStateUpdater(config)
1171          self.cost_tracker = CostTracker(config)
1172          self.running = False
1173          self.last_cost_update = 0  # Track when we last updated costs
1174          self.last_auto_vote = 0    # Track when we last ran auto-vote
1175          self.last_claude_md_check = 0  # Track when we last checked for confirmed principles
1176          self.nodebox_reachable = False  # Set at startup, skip pushing if False
1177          self.mesh_reachable = False  # Set at startup, for silence-led context buffer
1178          self.recent_insights = []  # Last few insights for mesh publishing
1179  
1180      def print_banner(self):
1181          """Print startup banner."""
1182          print("""
1183  ╔══════════════════════════════════════════════════════════════════════════╗
1184  ║                                                                          ║
1185  ║   ███████╗██╗██████╗ ███████╗████████╗     ██████╗ ███████╗███████╗      ║
1186  ║   ██╔════╝██║██╔══██╗██╔════╝╚══██╔══╝    ██╔═══██╗██╔════╝██╔════╝      ║
1187  ║   █████╗  ██║██████╔╝███████╗   ██║       ██║   ██║█████╗  █████╗        ║
1188  ║   ██╔══╝  ██║██╔══██╗╚════██║   ██║       ██║   ██║██╔══╝  ██╔══╝        ║
1189  ║   ██║     ██║██║  ██║███████║   ██║       ╚██████╔╝██║     ██║           ║
1190  ║   ╚═╝     ╚═╝╚═╝  ╚═╝╚══════╝   ╚═╝        ╚═════╝ ╚═╝     ╚═╝           ║
1191  ║                                                                          ║
1192  ║                    LOCAL FIRST OFFICER DAEMON                            ║
1193  ║                    Real-time insight detection                           ║
1194  ║                                                                          ║
1195  ╚══════════════════════════════════════════════════════════════════════════╝
1196  """)
1197          print(f"  Watching:  {self.config.claude_projects_dir}")
1198          print(f"  Nodebox:   {self.config.nodebox_url}")
1199          print(f"  Local:     {self.config.sovereign_os_dir}")
1200          print()
1201  
1202          # Show cost tracking status
1203          usage = self.cost_tracker.get_model_usage()
1204          if usage:
1205              efficiency = self.cost_tracker.calculate_efficiency(usage)
1206              fmt = CostTracker.format_sats_usd
1207              print("  ₿ COST TRACKING (Bitcoin-anchored @ $100k BTC)")
1208              print(f"     Subscription: {fmt(efficiency['costs']['subscription_sats'])}/month (fixed)")
1209              print(f"     API value:    {fmt(efficiency['costs']['api_sats'])} (if on API)")
1210              print(f"     Infra value:  {fmt(efficiency['costs']['infra_sats'])} (true cost)")
1211              print(f"     Savings:      {fmt(efficiency['savings']['vs_infra_sats'])} vs infrastructure")
1212              print(f"     Efficiency:   {efficiency['ratios']['api_efficiency']}x (API) / {efficiency['ratios']['infra_efficiency']}x (infra)")
1213              print(f"     Tokens used:  {efficiency['tokens_used']:,}")
1214              print()
1215  
1216      def run(self):
1217          """Main daemon loop."""
1218          self.print_banner()
1219          self.running = True
1220  
1221          # Check nodebox connectivity
1222          self.nodebox_reachable = self.pusher.check_nodebox()
1223          if self.nodebox_reachable:
1224              print("  ✓ Nodebox reachable")
1225          else:
1226              print("  ⚠ Nodebox not reachable - running in local-only mode (no further warnings)")
1227  
1228          # Check mesh connectivity (for silence-led context buffer)
1229          self.mesh_reachable = check_mesh_available()
1230          if self.mesh_reachable:
1231              print("  ✓ Mesh reachable (silence-led context buffer active)")
1232              print("  ✓ Auto-vote enabled (will vote on principle candidates)")
1233              print("  ✓ CLAUDE.md writer enabled (confirmed principles → protocol)")
1234          else:
1235              print("  ⚠ Mesh not reachable - cross-instance context disabled")
1236              print("  ⚠ Auto-vote disabled")
1237              print("  ⚠ CLAUDE.md writer disabled")
1238  
1239          print()
1240          print("  Listening for insights... (Ctrl+C to stop)")
1241          print("  ─────────────────────────────────────────")
1242          print()
1243  
1244          while self.running:
1245              try:
1246                  self._check_cycle()
1247                  time.sleep(self.config.poll_interval)
1248              except KeyboardInterrupt:
1249                  print("\n\nShutting down...")
1250                  break
1251              except Exception as e:
1252                  print(f"Error in cycle: {e}")
1253                  time.sleep(5)
1254  
1255      def _check_cycle(self):
1256          """Single check cycle."""
1257          now = time.time()
1258  
1259          # Update costs every 30 seconds
1260          if now - self.last_cost_update > 30:
1261              self._update_costs()
1262              self.last_cost_update = now
1263  
1264          # Auto-vote on principle candidates every 60 seconds
1265          if now - self.last_auto_vote > 60:
1266              self._auto_vote_on_principles()
1267              self.last_auto_vote = now
1268  
1269          # Check for confirmed principles and write to CLAUDE.md every 5 minutes
1270          if now - self.last_claude_md_check > 300:
1271              self._write_confirmed_principles_to_claude_md()
1272              self.last_claude_md_check = now
1273  
1274          # Find active transcript
1275          transcript = self.watcher.find_active_transcript()
1276          if not transcript:
1277              return
1278  
1279          # Check if session changed
1280          session_id = transcript.stem
1281          if session_id != self.watcher.active_session:
1282              print(f"  📂 New session: {session_id[:20]}...")
1283              self.watcher.active_session = session_id
1284              # Reset cost tracking for new session
1285              self.cost_tracker.session_start_snapshot = self.cost_tracker.get_current_snapshot()
1286  
1287          # Read new content
1288          new_content = self.watcher.read_new_content(transcript)
1289          if not new_content:
1290              return
1291  
1292          # Extract assistant text
1293          assistant_text = self.watcher.extract_assistant_content(new_content)
1294          if not assistant_text:
1295              return
1296  
1297          # Detect insights
1298          insights = self.watcher.detector.detect(assistant_text)
1299          if not insights:
1300              return
1301  
1302          # Process each insight
1303          for insight in insights:
1304              self._process_insight(insight)
1305  
1306      def _update_costs(self):
1307          """Update cost tracking in FO-STATE.json."""
1308          usage = self.cost_tracker.get_model_usage()
1309          if not usage:
1310              return
1311  
1312          efficiency = self.cost_tracker.calculate_efficiency(usage)
1313          snapshot = self.cost_tracker.get_current_snapshot()
1314  
1315          # Load FO state
1316          state_path = self.config.fo_state_path
1317          state = {}
1318          if state_path.exists():
1319              try:
1320                  with open(state_path) as f:
1321                      state = json.load(f)
1322              except:
1323                  pass
1324  
1325          # Update cost tracking with both sats and USD
1326          to_usd = CostTracker.sats_to_usd
1327          state["costs"] = {
1328              "updated": datetime.now().isoformat(),
1329              "btc_price_usd": CostTracker.BTC_PRICE_USD,
1330              "model": "claude-opus-4-5-20251101",
1331              "tokens": {
1332                  "input": usage.get("inputTokens", 0),
1333                  "output": usage.get("outputTokens", 0),
1334                  "cache_read": usage.get("cacheReadInputTokens", 0),
1335                  "cache_create": usage.get("cacheCreationInputTokens", 0),
1336                  "total": efficiency["tokens_used"]
1337              },
1338              "sats": efficiency["costs"],
1339              "usd": {
1340                  "subscription": to_usd(efficiency["costs"]["subscription_sats"]),
1341                  "api": to_usd(efficiency["costs"]["api_sats"]),
1342                  "infra": to_usd(efficiency["costs"]["infra_sats"])
1343              },
1344              "savings": {
1345                  "vs_api_sats": efficiency["savings"]["vs_api_sats"],
1346                  "vs_api_usd": to_usd(efficiency["savings"]["vs_api_sats"]),
1347                  "vs_infra_sats": efficiency["savings"]["vs_infra_sats"],
1348                  "vs_infra_usd": to_usd(efficiency["savings"]["vs_infra_sats"])
1349              },
1350              "efficiency": efficiency["ratios"],
1351              "extraction_rate": efficiency["extraction_rate"]
1352          }
1353  
1354          # Add session delta if available
1355          if self.cost_tracker.session_start_snapshot and snapshot:
1356              state["costs"]["session_delta"] = {
1357                  "input_tokens": snapshot.input_tokens - self.cost_tracker.session_start_snapshot.input_tokens,
1358                  "output_tokens": snapshot.output_tokens - self.cost_tracker.session_start_snapshot.output_tokens,
1359                  "api_sats": snapshot.estimated_sats - self.cost_tracker.session_start_snapshot.estimated_sats
1360              }
1361  
1362          # Write back
1363          with open(state_path, 'w') as f:
1364              json.dump(state, f, indent=2)
1365  
1366          # Also update LIVE-COMPRESSION.md with costs
1367          self.updater.update_live_compression_costs(state["costs"])
1368  
1369          # Publish FO state to mesh for silence-led context buffer
1370          if self.mesh_reachable:
1371              # Extract gravity wells from recent insights (topic extraction)
1372              gravity_wells = self._extract_gravity_wells()
1373  
1374              # Store gravity wells in FO state for persistence
1375              state["gravity_wells"] = gravity_wells
1376  
1377              # Re-save with gravity wells
1378              with open(state_path, 'w') as f:
1379                  json.dump(state, f, indent=2)
1380  
1381              axiom_activity = state.get("axiom_activity", {})
1382              energy = state.get("energy", {})
1383  
1384              mesh_state = {
1385                  "sessionId": self.watcher.active_session,
1386                  "gravityWells": gravity_wells[:5] if gravity_wells else [],
1387                  "axiomActivity": axiom_activity,
1388                  "energyState": energy,
1389                  "recentInsights": [
1390                      {"type": i.type, "content": i.content[:100]}
1391                      for i in self.recent_insights[-3:]
1392                  ]
1393              }
1394              publish_fo_state_to_mesh(mesh_state)
1395  
1396      def _process_insight(self, insight: DetectedInsight):
1397          """Process a detected insight."""
1398          # Print to console
1399          importance_bar = "█" * int(insight.importance * 10) + "░" * (10 - int(insight.importance * 10))
1400          divergence_bar = "▓" * int(insight.divergence_score * 10) + "░" * (10 - int(insight.divergence_score * 10))
1401          energy_icon = {"potential": "⚡", "kinetic": "🚀", "transition": "🔄"}.get(insight.energy_state, "❓")
1402          print(f"  {energy_icon} [{insight.type}] I:{importance_bar} {insight.importance:.2f} | D:{divergence_bar} {insight.divergence_score:.2f}")
1403          print(f"     {insight.content[:80]}...")
1404          if insight.axioms_involved:
1405              print(f"     Axioms: {', '.join(insight.axioms_involved)}")
1406          print(f"     Energy: {insight.energy_state}")
1407  
1408          # Update local state
1409          self.updater.update_fo_state([insight])
1410          self.updater.update_live_compression([insight])
1411  
1412          # Store for mesh publishing (keep last 10)
1413          self.recent_insights.append(insight)
1414          self.recent_insights = self.recent_insights[-10:]
1415  
1416          # Push to nodebox (legacy) - only if reachable at startup
1417          if self.nodebox_reachable and self.pusher.push_insight(insight):
1418              print(f"     → Pushed to nodebox")
1419  
1420          # Publish to mesh (N of X - all instances receive this)
1421          mesh_published = publish_to_mesh("fo_insight", {
1422              "type": insight.type,
1423              "content": insight.content,
1424              "axioms": insight.axioms_involved,
1425              "importance": insight.importance,
1426              "energy_state": insight.energy_state,
1427              "divergence_score": insight.divergence_score
1428          })
1429          if mesh_published:
1430              print(f"     → Published to mesh")
1431  
1432          # HIGH-IMPORTANCE INSIGHTS: Broadcast as Aha Moment (importance >= 0.7)
1433          # These get priority placement in all Claude instance bootstraps
1434          if insight.importance >= 0.7:
1435              aha_published = publish_aha_moment({
1436                  "from": socket.gethostname(),
1437                  "content": insight.content,
1438                  "type": insight.type,
1439                  "axioms": insight.axioms_involved,
1440                  "importance": insight.importance,
1441                  "energy_state": insight.energy_state,
1442                  "session_id": self.watcher.active_session,
1443              })
1444              if aha_published:
1445                  print(f"     🎯 AHA MOMENT broadcast to all instances!")
1446  
1447          # HIGH-DIVERGENCE INSIGHTS: Auto-publish as Principle Candidate (D > 0.5)
1448          # These might represent new principles to be tested across the mesh
1449          if insight.divergence_score > 0.5:
1450              # Generate principle name from content
1451              principle_name = self._generate_principle_name(insight)
1452  
1453              principle_published = publish_principle_candidate({
1454                  "from": socket.gethostname(),
1455                  "name": principle_name,
1456                  "statement": insight.content[:200],
1457                  "rationale": f"Auto-discovered from {insight.type} insight with D={insight.divergence_score:.2f}",
1458                  "evidence": [insight.context[:300]] if insight.context else [],
1459                  "axiom_connections": insight.axioms_involved,
1460                  "divergence_score": insight.divergence_score,
1461                  "proposed_by": f"{socket.gethostname()}/{self.watcher.active_session[:20] if self.watcher.active_session else 'unknown'}",
1462              })
1463              if principle_published:
1464                  print(f"     📜 PRINCIPLE CANDIDATE auto-published: {principle_name}")
1465                  print(f"        D={insight.divergence_score:.2f} > 0.5 threshold")
1466  
1467          print()
1468  
1469      def _extract_gravity_wells(self) -> List[str]:
1470          """
1471          Extract gravity wells (hot topics) from recent insights.
1472  
1473          Gravity wells are topics that appear frequently in recent insights,
1474          indicating where the session's attention is focused.
1475          """
1476          if not self.recent_insights:
1477              return []
1478  
1479          # Count word frequencies across recent insights
1480          word_counts: Dict[str, int] = {}
1481          stopwords = {
1482              'about', 'which', 'would', 'could', 'should', 'where', 'there',
1483              'their', 'these', 'those', 'being', 'having', 'doing', 'using',
1484              'claude', 'system', 'every', 'always', 'never', 'before', 'after',
1485              'content', 'insight', 'principle', 'pattern', 'axiom', 'delta',
1486              'protocol', 'session', 'instance', 'context', 'token', 'message',
1487          }
1488  
1489          for insight in self.recent_insights[-20:]:  # Last 20 insights
1490              content = insight.content.lower()
1491              words = re.findall(r'\b[a-z]{5,}\b', content)
1492              for word in words:
1493                  if word not in stopwords:
1494                      word_counts[word] = word_counts.get(word, 0) + 1
1495  
1496          # Also count axioms as topics
1497          for insight in self.recent_insights[-20:]:
1498              for axiom in insight.axioms_involved:
1499                  word_counts[axiom] = word_counts.get(axiom, 0) + 2  # Axioms weighted 2x
1500  
1501          # Return top 10 topics by frequency (minimum 2 occurrences)
1502          sorted_topics = sorted(
1503              [(word, count) for word, count in word_counts.items() if count >= 2],
1504              key=lambda x: x[1],
1505              reverse=True
1506          )
1507  
1508          return [topic for topic, _ in sorted_topics[:10]]
1509  
1510      def _generate_principle_name(self, insight: DetectedInsight) -> str:
1511          """Generate a short principle name from insight content."""
1512          content = insight.content.lower()
1513  
1514          # Extract key concepts (nouns/verbs 5+ chars)
1515          words = re.findall(r'\b[a-z]{5,}\b', content)
1516          # Filter common words
1517          stopwords = {
1518              'about', 'which', 'would', 'could', 'should', 'where', 'there',
1519              'their', 'these', 'those', 'being', 'having', 'doing', 'using',
1520              'claude', 'system', 'every', 'always', 'never', 'before', 'after',
1521          }
1522          key_words = [w for w in words if w not in stopwords][:3]
1523  
1524          if key_words:
1525              # Create name like "A5-Word1-Word2"
1526              next_axiom = f"A{5 + len(insight.axioms_involved)}"  # Heuristic
1527              name_parts = [w.capitalize() for w in key_words]
1528              return f"{next_axiom}-{'-'.join(name_parts)}"
1529          else:
1530              # Fallback
1531              timestamp = datetime.now().strftime("%H%M")
1532              return f"A5-Candidate-{timestamp}"
1533  
1534      # ═══════════════════════════════════════════════════════════════════════════════
1535      # AUTO-VOTE ON PRINCIPLE CANDIDATES
1536      # ═══════════════════════════════════════════════════════════════════════════════
1537  
1538      def _evaluate_principle_evidence(self, principle: Dict[str, Any]) -> Dict[str, Any]:
1539          """
1540          Evaluate local evidence for/against a principle candidate.
1541  
1542          Checks recent insights for:
1543          - Keyword overlap with principle statement
1544          - Axiom alignment
1545          - Pattern type correlation
1546  
1547          Returns:
1548              {
1549                  "support_score": 0-1,
1550                  "reject_score": 0-1,
1551                  "evidence": [list of supporting/contradicting insights],
1552                  "reason": "explanation"
1553              }
1554          """
1555          statement = principle.get("statement", "").lower()
1556          principle_axioms = set(principle.get("axiom_connections", []))
1557          principle_name = principle.get("name", "")
1558  
1559          # Extract key words from principle statement
1560          principle_words = set(re.findall(r'\b[a-z]{5,}\b', statement))
1561          stopwords = {
1562              'about', 'which', 'would', 'could', 'should', 'where', 'there',
1563              'their', 'these', 'those', 'being', 'having', 'doing', 'using',
1564              'system', 'every', 'always', 'never', 'before', 'after',
1565          }
1566          principle_words -= stopwords
1567  
1568          support_evidence = []
1569          reject_evidence = []
1570  
1571          for insight in self.recent_insights:
1572              insight_content = insight.content.lower()
1573              insight_words = set(re.findall(r'\b[a-z]{5,}\b', insight_content))
1574              insight_axioms = set(insight.axioms_involved)
1575  
1576              # Calculate word overlap
1577              word_overlap = len(principle_words & insight_words)
1578  
1579              # Calculate axiom alignment
1580              axiom_overlap = len(principle_axioms & insight_axioms)
1581  
1582              # High overlap = supporting evidence
1583              if word_overlap >= 2 or axiom_overlap >= 1:
1584                  # Check if insight supports or contradicts
1585                  # High importance + similar topic = support
1586                  if insight.importance >= 0.6:
1587                      support_evidence.append({
1588                          "content": insight.content[:100],
1589                          "word_overlap": word_overlap,
1590                          "axiom_overlap": axiom_overlap,
1591                          "importance": insight.importance
1592                      })
1593                  # Low importance on same topic might indicate the principle
1594                  # isn't universally applicable
1595                  elif insight.importance < 0.4 and word_overlap >= 3:
1596                      reject_evidence.append({
1597                          "content": insight.content[:100],
1598                          "reason": "low-importance counterexample"
1599                      })
1600  
1601          # Calculate scores
1602          support_score = min(1.0, len(support_evidence) * 0.25)
1603          reject_score = min(1.0, len(reject_evidence) * 0.35)
1604  
1605          # Build reason
1606          if support_score > reject_score:
1607              reason = f"Found {len(support_evidence)} supporting insight(s) with keyword/axiom overlap"
1608          elif reject_score > support_score:
1609              reason = f"Found {len(reject_evidence)} potential counterexample(s)"
1610          else:
1611              reason = "Insufficient local evidence to vote"
1612  
1613          return {
1614              "support_score": support_score,
1615              "reject_score": reject_score,
1616              "support_evidence": support_evidence,
1617              "reject_evidence": reject_evidence,
1618              "reason": reason
1619          }
1620  
1621      def _auto_vote_on_principles(self):
1622          """
1623          Automatically vote on principle candidates based on local evidence.
1624  
1625          Called periodically (every 60 seconds) to evaluate and vote on
1626          principles that this FO hasn't voted on yet.
1627          """
1628          if not self.mesh_reachable:
1629              return
1630  
1631          # Fetch current principle candidates
1632          candidates = fetch_principle_candidates()
1633          if not candidates:
1634              return
1635  
1636          my_hostname = socket.gethostname()
1637  
1638          for principle in candidates:
1639              name = principle.get("name", "")
1640              status = principle.get("status", "candidate")
1641              voters = principle.get("voters", [])
1642  
1643              # Skip if already voted or not in candidate status
1644              if my_hostname in voters:
1645                  continue
1646              if status != "candidate":
1647                  continue
1648  
1649              # Skip if we proposed this principle (don't vote on own proposals)
1650              proposed_by = principle.get("proposed_by", "")
1651              if my_hostname in proposed_by:
1652                  continue
1653  
1654              # Evaluate evidence
1655              evaluation = self._evaluate_principle_evidence(principle)
1656  
1657              # Decision thresholds
1658              SUPPORT_THRESHOLD = 0.4  # Need 40% support score to vote support
1659              REJECT_THRESHOLD = 0.5   # Need 50% reject score to vote reject
1660              MIN_EVIDENCE = 1         # Need at least 1 piece of evidence
1661  
1662              total_evidence = len(evaluation["support_evidence"]) + len(evaluation["reject_evidence"])
1663  
1664              if total_evidence < MIN_EVIDENCE:
1665                  # Not enough evidence to vote - skip
1666                  continue
1667  
1668              # Decide vote
1669              if evaluation["support_score"] >= SUPPORT_THRESHOLD and \
1670                 evaluation["support_score"] > evaluation["reject_score"]:
1671                  vote = "support"
1672                  reason = evaluation["reason"]
1673              elif evaluation["reject_score"] >= REJECT_THRESHOLD:
1674                  vote = "reject"
1675                  reason = evaluation["reason"]
1676              else:
1677                  # Scores too close or too low - abstain
1678                  continue
1679  
1680              # Cast vote
1681              if vote_on_principle(name, vote, reason):
1682                  print(f"  🗳️  AUTO-VOTE: {vote} on {name}")
1683                  print(f"      Reason: {reason}")
1684                  print(f"      Evidence: {total_evidence} insight(s) analyzed")
1685                  print()
1686  
1687      # ═══════════════════════════════════════════════════════════════════════════════
1688      # AUTO-WRITE CONFIRMED PRINCIPLES TO CLAUDE.MD
1689      # ═══════════════════════════════════════════════════════════════════════════════
1690  
1691      def _write_confirmed_principles_to_claude_md(self):
1692          """
1693          Check for confirmed principles and write them to CLAUDE.md.
1694  
1695          Confirmed principles are appended as CANDIDATE axioms in the
1696          Key Derived Principles section, marked for human review.
1697          """
1698          if not self.mesh_reachable:
1699              return
1700  
1701          # Fetch confirmed principles that haven't been written yet
1702          confirmed = fetch_confirmed_principles()
1703          if not confirmed:
1704              return
1705  
1706          claude_md_path = self.config.sovereign_os_dir / "CLAUDE.md"
1707          if not claude_md_path.exists():
1708              print(f"  ⚠ CLAUDE.md not found at {claude_md_path}")
1709              return
1710  
1711          # Read current CLAUDE.md
1712          with open(claude_md_path, 'r') as f:
1713              content = f.read()
1714  
1715          # Find the Key Derived Principles section
1716          marker = "## Key Derived Principles"
1717          if marker not in content:
1718              print(f"  ⚠ Could not find '{marker}' section in CLAUDE.md")
1719              return
1720  
1721          # Track what we write
1722          written_principles = []
1723  
1724          for principle in confirmed:
1725              name = principle.get("name", "Unknown")
1726              statement = principle.get("statement", "")
1727              axiom_connections = principle.get("axiom_connections", [])
1728              votes = principle.get("votes", {})
1729              support = votes.get("support", 0)
1730              reject = votes.get("reject", 0)
1731  
1732              # Check if already in CLAUDE.md (by name)
1733              if name in content:
1734                  print(f"  ⚠ {name} already in CLAUDE.md, skipping")
1735                  mark_principle_written(name)
1736                  continue
1737  
1738              # Format the principle entry
1739              connections_str = f" ({', '.join(axiom_connections)})" if axiom_connections else ""
1740              entry = f"- **{name}**{connections_str} - {statement} *MESH-CONFIRMED: +{support}/-{reject} votes* 🧪 CANDIDATE\n"
1741  
1742              # Insert after the marker line
1743              # Find the end of the section header line
1744              marker_pos = content.find(marker)
1745              # Find the next line after the marker
1746              next_newline = content.find("\n", marker_pos)
1747              if next_newline == -1:
1748                  next_newline = len(content)
1749  
1750              # Find the first bullet point after the section header
1751              first_bullet = content.find("\n- ", next_newline)
1752              if first_bullet == -1 or first_bullet > content.find("\n##", next_newline + 1):
1753                  # No bullets in section or section ends before next bullet
1754                  # Insert right after the section header
1755                  insert_pos = next_newline + 1
1756              else:
1757                  # Insert before the first bullet
1758                  insert_pos = first_bullet + 1
1759  
1760              # Insert the new principle
1761              content = content[:insert_pos] + entry + content[insert_pos:]
1762  
1763              written_principles.append(name)
1764              print(f"  ✍️  WRITING TO CLAUDE.MD: {name}")
1765              print(f"      Statement: {statement[:60]}...")
1766              print(f"      Votes: +{support}/-{reject}")
1767              print()
1768  
1769          if written_principles:
1770              # Write back to CLAUDE.md
1771              with open(claude_md_path, 'w') as f:
1772                  f.write(content)
1773  
1774              # Mark all as written in mesh
1775              for name in written_principles:
1776                  mark_principle_written(name)
1777  
1778              print(f"  ✅ Wrote {len(written_principles)} principle(s) to CLAUDE.md")
1779              print()
1780  
1781      def stop(self):
1782          """Stop the daemon."""
1783          self.running = False
1784  
1785  
1786  # ═══════════════════════════════════════════════════════════════════════════════
1787  # ENTRY POINT
1788  # ═══════════════════════════════════════════════════════════════════════════════
1789  
1790  def main():
1791      parser = argparse.ArgumentParser(
1792          description="Local First Officer Daemon - Real-time insight detection"
1793      )
1794      parser.add_argument(
1795          "--nodebox", "-n",
1796          default="100.79.197.96",
1797          help="Nodebox Tailscale IP (default: 100.79.197.96)"
1798      )
1799      parser.add_argument(
1800          "--port", "-p",
1801          type=int,
1802          default=7777,
1803          help="Nodebox port (default: 7777)"
1804      )
1805      parser.add_argument(
1806          "--interval", "-i",
1807          type=float,
1808          default=2.0,
1809          help="Poll interval in seconds (default: 2.0)"
1810      )
1811      parser.add_argument(
1812          "--sovereign-dir", "-d",
1813          type=Path,
1814          default=Path.home() / "repos" / "Sovereign_OS",
1815          help="Sovereign OS directory"
1816      )
1817  
1818      args = parser.parse_args()
1819  
1820      config = LocalFOConfig(
1821          nodebox_ip=args.nodebox,
1822          nodebox_port=args.port,
1823          poll_interval=args.interval,
1824          sovereign_os_dir=args.sovereign_dir
1825      )
1826  
1827      daemon = LocalFirstOfficer(config)
1828  
1829      # Handle signals
1830      def signal_handler(sig, frame):
1831          daemon.stop()
1832  
1833      signal.signal(signal.SIGINT, signal_handler)
1834      signal.signal(signal.SIGTERM, signal_handler)
1835  
1836      daemon.run()
1837  
1838  
1839  if __name__ == "__main__":
1840      main()