/ scripts / a4_steward.py
a4_steward.py
  1  #!/usr/bin/env python3
  2  """
  3  A4 Steward - Principle topology maintenance for Ergodic Asymmetry
  4  
  5  This is a "center of excellence" agent for A4. It:
  6  1. Knows the full topology of A4 (positive/negative instances, edges)
  7  2. Detects when A4 is invoked with divergence
  8  3. Propagates edge discoveries to all connections
  9  4. Identifies hidden connections that become visible with new edges
 10  
 11  Usage:
 12      python scripts/a4_steward.py --crawl              # Update topology
 13      python scripts/a4_steward.py --test "statement"   # Test A4 resonance
 14      python scripts/a4_steward.py --edge "description" # Record new edge
 15      python scripts/a4_steward.py --propagate          # Propagate edges to connections
 16      python scripts/a4_steward.py --find-hidden        # Find newly connectable orphans
 17  
 18  Pattern: The Talmud defines the edges of the Torah.
 19  """
 20  
 21  import os
 22  import re
 23  import sys
 24  import json
 25  import argparse
 26  from pathlib import Path
 27  from datetime import datetime
 28  from typing import Dict, List, Set, Tuple, Optional
 29  from collections import Counter
 30  
 31  # A4 semantic field - terms that indicate A4 relevance
 32  A4_STRONG_TERMS = {
 33      'ruin', 'ruinous', 'catastrophe', 'catastrophic', 'terminal',
 34      'irreversible', 'unrecoverable', 'survival', 'survive', 'ergodic',
 35      'non-ergodic', 'nonergodic', 'asymmetry', 'asymmetric', 'compound',
 36      'compounding', 'time-average', 'ensemble-average'
 37  }
 38  
 39  A4_MEDIUM_TERMS = {
 40      'risk', 'risky', 'dangerous', 'cheap', 'expensive', 'cost',
 41      'rebuild', 'rewrite', 'redo', 'reversible', 'undo', 'rollback',
 42      'prevent', 'prevention', 'avoid', 'loss', 'losses'
 43  }
 44  
 45  A4_WEAK_TERMS = {
 46      'careful', 'caution', 'cautious', 'fail', 'failure', 'failing',
 47      'recover', 'recovery', 'backup', 'restore'
 48  }
 49  
 50  # A4 exemplar statements (positive instances)
 51  A4_EXEMPLARS = [
 52      ("prevent ruin before optimizing gain", 1.0),
 53      ("one catastrophic loss undoes accumulated gains", 0.95),
 54      ("time average does not equal ensemble average", 0.90),
 55      ("some positions are terminal", 0.85),
 56      ("rewrites are cheap", 0.80),
 57      ("fail fast fail cheap", 0.80),
 58      ("build liberally", 0.73),
 59      ("survival precedes optimization", 0.85),
 60      ("context loss is terminal", 0.90),
 61      ("rebuilding is not catastrophic", 0.75),
 62  ]
 63  
 64  # A4 counter-examples (negative instances - what A4 is NOT)
 65  A4_COUNTER_EXAMPLES = [
 66      "don't make mistakes",  # A4 isn't about ALL mistakes
 67      "be careful always",     # A4 isn't about avoiding all risk
 68      "optimize expected value", # A4 rejects EV in non-ergodic systems
 69  ]
 70  
 71  # Known edges
 72  A4_EDGES = [
 73      {
 74          "name": "Build vs Deploy Asymmetry",
 75          "description": "Build errors are cheap (rebuild). Deploy errors can be expensive.",
 76          "implication": "A4 applies MORE to deployment decisions than build decisions",
 77          "discovered": "2026-01-15"
 78      },
 79      {
 80          "name": "Reversibility Determines Relevance",
 81          "description": "A4 matters when decisions are irreversible.",
 82          "implication": "Check reversibility before invoking A4",
 83          "discovered": "2026-01-15"
 84      },
 85      {
 86          "name": "Ensemble vs Time Average",
 87          "description": "A4 is specifically about non-ergodic systems.",
 88          "implication": "If system is ergodic, A4 doesn't apply",
 89          "discovered": "2026-01-15"
 90      }
 91  ]
 92  
 93  
 94  def get_base_path() -> Path:
 95      """Get the base path of Sovereign_OS."""
 96      return Path(__file__).parent.parent
 97  
 98  
 99  def normalize_text(text: str) -> str:
100      """Normalize text for comparison."""
101      return re.sub(r'[^a-z0-9\s]', '', text.lower())
102  
103  
104  def extract_terms(text: str) -> Counter:
105      """Extract terms from text."""
106      normalized = normalize_text(text)
107      words = normalized.split()
108      return Counter(word for word in words if len(word) > 2)
109  
110  
111  def calculate_a4_resonance(text: str) -> Tuple[float, Dict[str, float], List[str]]:
112      """
113      Calculate A4 resonance for a given text.
114  
115      Returns:
116          - Overall score (0-1)
117          - Breakdown by signal type
118          - Matched exemplars
119      """
120      normalized = normalize_text(text)
121      terms = set(normalized.split())
122  
123      # Term matching
124      strong_matches = terms & A4_STRONG_TERMS
125      medium_matches = terms & A4_MEDIUM_TERMS
126      weak_matches = terms & A4_WEAK_TERMS
127  
128      term_score = (
129          len(strong_matches) * 0.15 +
130          len(medium_matches) * 0.08 +
131          len(weak_matches) * 0.03
132      )
133      term_score = min(0.5, term_score)  # Cap at 0.5
134  
135      # Exemplar matching
136      exemplar_score = 0.0
137      matched_exemplars = []
138  
139      for exemplar, weight in A4_EXEMPLARS:
140          exemplar_normalized = normalize_text(exemplar)
141          exemplar_terms = set(exemplar_normalized.split())
142  
143          # Check for term overlap
144          overlap = terms & exemplar_terms
145          if len(overlap) >= 2 or any(t in exemplar_normalized for t in terms if len(t) > 4):
146              exemplar_score = max(exemplar_score, weight * 0.5)
147              matched_exemplars.append(exemplar)
148  
149          # Check for substring match
150          if any(term in normalized for term in exemplar_terms if len(term) > 5):
151              exemplar_score = max(exemplar_score, weight * 0.3)
152              if exemplar not in matched_exemplars:
153                  matched_exemplars.append(exemplar)
154  
155      # Counter-example check (reduces score)
156      counter_penalty = 0.0
157      for counter in A4_COUNTER_EXAMPLES:
158          counter_normalized = normalize_text(counter)
159          if counter_normalized in normalized:
160              counter_penalty = 0.2
161              break
162  
163      # Calculate final score
164      total_score = min(1.0, term_score + exemplar_score - counter_penalty)
165  
166      breakdown = {
167          "strong_terms": len(strong_matches) * 0.15,
168          "medium_terms": len(medium_matches) * 0.08,
169          "weak_terms": len(weak_matches) * 0.03,
170          "exemplar_match": exemplar_score,
171          "counter_penalty": -counter_penalty
172      }
173  
174      return total_score, breakdown, matched_exemplars
175  
176  
177  def crawl_topology() -> Dict:
178      """Crawl the graph for all A4 references and update topology."""
179      base_path = get_base_path()
180      scan_dirs = ['docs', 'patterns', 'sessions', 'dashboards']
181  
182      results = {
183          "files_scanned": 0,
184          "direct_references": [],
185          "semantic_matches": [],
186          "high_resonance": [],
187          "orphans_with_a4_terms": [],
188          "crawl_time": datetime.now().isoformat()
189      }
190  
191      # Track inbound links for orphan detection
192      inbound_links = {}
193      all_files = {}
194  
195      for scan_dir in scan_dirs:
196          dir_path = base_path / scan_dir
197          if not dir_path.exists():
198              continue
199  
200          for filepath in dir_path.rglob('*.md'):
201              if '.git' in filepath.parts:
202                  continue
203  
204              try:
205                  content = filepath.read_text(encoding='utf-8')
206              except Exception:
207                  continue
208  
209              results["files_scanned"] += 1
210              rel_path = str(filepath.relative_to(base_path))
211  
212              # Store for orphan detection
213              file_key = filepath.stem.lower().replace(' ', '-')
214              all_files[file_key] = rel_path
215  
216              # Extract outbound links
217              links = re.findall(r'\[\[([^\]|]+)', content)
218              for link in links:
219                  link_key = link.lower().replace(' ', '-')
220                  if link_key not in inbound_links:
221                      inbound_links[link_key] = set()
222                  inbound_links[link_key].add(file_key)
223  
224              # Check for direct A4 references
225              if re.search(r'\bA4\b|ergodic|ergodicity', content, re.IGNORECASE):
226                  results["direct_references"].append({
227                      "file": rel_path,
228                      "type": "direct"
229                  })
230  
231              # Calculate semantic resonance
232              score, breakdown, exemplars = calculate_a4_resonance(content)
233  
234              if score >= 0.3:
235                  results["semantic_matches"].append({
236                      "file": rel_path,
237                      "score": score,
238                      "exemplars": exemplars
239                  })
240  
241              if score >= 0.5:
242                  results["high_resonance"].append({
243                      "file": rel_path,
244                      "score": score
245                  })
246  
247      # Find orphans with A4 terms
248      orphans = set(all_files.keys()) - set(inbound_links.keys())
249      for orphan in orphans:
250          if orphan in all_files:
251              filepath = base_path / all_files[orphan]
252              if filepath.exists():
253                  try:
254                      content = filepath.read_text(encoding='utf-8')
255                      score, _, exemplars = calculate_a4_resonance(content)
256                      if score >= 0.2:  # Low threshold for orphans
257                          results["orphans_with_a4_terms"].append({
258                              "file": all_files[orphan],
259                              "score": score,
260                              "exemplars": exemplars
261                          })
262                  except Exception:
263                      pass
264  
265      return results
266  
267  
268  def get_divergence_zone(score: float) -> Tuple[str, str, str]:
269      """
270      Determine divergence zone and action based on score.
271  
272      Returns (zone_name, action, handler)
273  
274      Divergence D = 1 - Resonance Score
275      But we invert for intuition: high resonance = low divergence
276      """
277      # Convert resonance to divergence
278      d = 1.0 - score
279  
280      if d < 0.15:  # score > 0.85
281          return ("CORE", "Proceed normally", "Auto")
282      elif d < 0.30:  # score > 0.70
283          return ("CONFIRMED", "Log positive instance to topology", "Auto")
284      elif d < 0.50:  # score > 0.50
285          return ("EDGE ZONE", "Steward activates: discover edge, propagate", "Principle Steward")
286      elif d < 0.70:  # score > 0.30
287          return ("TENSION", "Escalate: principle may need adjustment", "Human")
288      else:  # score < 0.30
289          return ("DIVERGENT", "Three options: adjust principle, spawn candidate, or reject", "Human")
290  
291  
292  def test_statement(text: str) -> None:
293      """Test a statement for A4 resonance and explain the result."""
294      score, breakdown, exemplars = calculate_a4_resonance(text)
295  
296      print(f"\n{'='*60}")
297      print(f"A4 STEWARD - Resonance Analysis")
298      print(f"{'='*60}\n")
299  
300      print(f"Statement: \"{text}\"\n")
301  
302      # Score visualization
303      bar_length = 40
304      filled = int(score * bar_length)
305      bar = "█" * filled + "░" * (bar_length - filled)
306      print(f"A4 Resonance: [{bar}] {score:.0%}\n")
307  
308      # Divergence zone
309      zone, action, handler = get_divergence_zone(score)
310      divergence = 1.0 - score
311      print(f"Divergence: D = {divergence:.2f}")
312      print(f"Zone: {zone}")
313      print(f"Action: {action}")
314      print(f"Handler: {handler}\n")
315  
316      # Breakdown
317      print("Score Breakdown:")
318      for key, value in breakdown.items():
319          if value != 0:
320              sign = "+" if value > 0 else ""
321              print(f"  {key}: {sign}{value:.2f}")
322  
323      # Matched exemplars
324      if exemplars:
325          print(f"\nMatched Exemplars:")
326          for ex in exemplars:
327              print(f"  - \"{ex}\"")
328  
329      # Edge analysis
330      print(f"\nEdge Analysis:")
331  
332      # Check reversibility edge
333      if any(term in text.lower() for term in ['build', 'rewrite', 'rebuild', 'cheap']):
334          print("  - Reversibility: HIGH (cheap to redo = low A4 concern)")
335      elif any(term in text.lower() for term in ['deploy', 'production', 'irreversible']):
336          print("  - Reversibility: LOW (hard to undo = high A4 concern)")
337      else:
338          print("  - Reversibility: UNKNOWN (check if decision is reversible)")
339  
340      # Zone-specific recommendations
341      print(f"\n{'='*60}")
342      if zone == "CORE":
343          print("STATUS: Strong A4 alignment - proceed normally")
344      elif zone == "CONFIRMED":
345          print("STATUS: Good A4 fit - logging as positive instance")
346      elif zone == "EDGE ZONE":
347          print("STATUS: STEWARD ACTIVATING - edge discovery triggered")
348          print("  → Document this edge in A4 topology")
349          print("  → Propagate to A4 connections")
350          print("  → Check for newly visible orphan connections")
351      elif zone == "TENSION":
352          print("STATUS: High divergence - escalating to human")
353          print("  → May need to adjust A4 principle")
354          print("  → Or reject this idea as A4-violating")
355      else:  # DIVERGENT
356          print("STATUS: Major divergence - three options:")
357          print("  1. ADJUST: A4 principle is incomplete")
358          print("  2. SPAWN: This is a new candidate principle")
359          print("  3. REJECT: This idea violates A4 for bad reasons")
360      print(f"{'='*60}\n")
361  
362  
363  def record_edge(description: str, source: str = "manual") -> None:
364      """
365      Record a new edge discovery for A4.
366  
367      This triggers the credit attribution flow:
368      1. Record the edge and its source
369      2. Track propagation to connections
370      3. Measure ΔF reduction across system
371      4. Attribute credit back to source
372      """
373      topology_path = get_base_path() / "sessions/principle-topology/A4-ergodic-asymmetry.md"
374  
375      if not topology_path.exists():
376          print(f"Error: Topology file not found at {topology_path}")
377          return
378  
379      content = topology_path.read_text(encoding='utf-8')
380  
381      # Create edge entry with credit tracking
382      edge_num = len(A4_EDGES) + 1
383      today = datetime.now().strftime("%Y-%m-%d")
384      timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
385  
386      edge_entry = f"""
387  ### Edge {edge_num}: {description.split('.')[0]}
388  - **Discovery:** {description}
389  - **Implication:** [Needs analysis]
390  - **Source:** {source}
391  - **Discovered:** {today}
392  - **Credit ID:** edge-{timestamp}
393  """
394  
395      # Find edges section and append
396      edges_marker = "## Edges (Boundaries Discovered)"
397      if edges_marker in content:
398          # Find next section
399          parts = content.split(edges_marker)
400          if len(parts) >= 2:
401              # Find the end of edges section
402              next_section = re.search(r'\n## ', parts[1])
403              if next_section:
404                  insert_pos = next_section.start()
405                  new_content = (
406                      parts[0] + edges_marker +
407                      parts[1][:insert_pos] + edge_entry +
408                      parts[1][insert_pos:]
409                  )
410              else:
411                  new_content = content + edge_entry
412  
413              topology_path.write_text(new_content, encoding='utf-8')
414              print(f"\n✓ Edge recorded in topology: {description[:50]}...")
415              print(f"  File: {topology_path}")
416      else:
417          print("Could not find edges section in topology file")
418  
419      # Also create a resonance alert
420      alerts_dir = get_base_path() / "sessions/RESONANCE-ALERTS"
421      alerts_dir.mkdir(exist_ok=True)
422  
423      timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
424      alert_path = alerts_dir / f"{timestamp}-edge-discovery-a4.md"
425  
426      credit_id = f"edge-{timestamp}"
427  
428      alert_content = f"""# A4 Edge Discovery - {today}
429  
430  **Type:** EDGE_DISCOVERY
431  **Principle:** A4 (Ergodic Asymmetry)
432  **Detected:** {datetime.now().isoformat()}
433  **Credit ID:** {credit_id}
434  **Source:** {source}
435  
436  ---
437  
438  ## New Edge
439  
440  > {description}
441  
442  ## Credit Attribution
443  
444  This edge discovery will propagate and reduce free energy across the system.
445  Credit flows back to the source insight.
446  
447  ```
448  SOURCE: {source}
449  TRIGGER: Edge discovery in A4
450  EDGE: "{description[:50]}..."
451  PROPAGATION: [Pending - run --propagate]
452  TOTAL CREDIT: ΔF = [Calculate after propagation]
453  ```
454  
455  ## Propagation Required
456  
457  This edge should be propagated to:
458  1. All direct A4 references (28 files)
459  2. High-resonance files (>50%)
460  3. Orphans that might now connect
461  
462  ## Action Required
463  
464  Run: `python scripts/a4_steward.py --propagate`
465  
466  After propagation, update credit ledger with ΔF values.
467  
468  ---
469  
470  *Auto-generated by A4 Steward | {today}*
471  """
472  
473      alert_path.write_text(alert_content, encoding='utf-8')
474      print(f"✓ Alert created: {alert_path}")
475  
476  
477  def propagate_edges() -> None:
478      """Propagate edge discoveries to all A4 connections."""
479      print("\n" + "="*60)
480      print("A4 STEWARD - Edge Propagation")
481      print("="*60 + "\n")
482  
483      # Crawl current topology
484      results = crawl_topology()
485  
486      print(f"Files scanned: {results['files_scanned']}")
487      print(f"Direct references: {len(results['direct_references'])}")
488      print(f"Semantic matches: {len(results['semantic_matches'])}")
489      print(f"High resonance (>50%): {len(results['high_resonance'])}")
490      print(f"Orphans with A4 terms: {len(results['orphans_with_a4_terms'])}")
491  
492      print("\n" + "-"*60)
493      print("Files that should understand current A4 edges:")
494      print("-"*60 + "\n")
495  
496      # List high-resonance files
497      for match in sorted(results['high_resonance'], key=lambda x: x['score'], reverse=True):
498          score_pct = match['score'] * 100
499          print(f"  [{score_pct:5.1f}%] {match['file']}")
500  
501      print("\n" + "-"*60)
502      print("Edges to propagate:")
503      print("-"*60 + "\n")
504  
505      for edge in A4_EDGES:
506          print(f"  • {edge['name']}")
507          print(f"    {edge['description']}")
508          print(f"    → {edge['implication']}")
509          print()
510  
511      print("="*60)
512      print("SUGGESTION: Review high-resonance files for edge awareness")
513      print("Pattern: The Talmud defines the edges of the Torah")
514      print("="*60 + "\n")
515  
516  
517  def find_hidden_connections() -> None:
518      """Find orphans that might now connect given current edges."""
519      print("\n" + "="*60)
520      print("A4 STEWARD - Hidden Connection Discovery")
521      print("="*60 + "\n")
522  
523      results = crawl_topology()
524  
525      orphans = results['orphans_with_a4_terms']
526  
527      if not orphans:
528          print("No orphans found with A4 semantic field.")
529          print("Either the graph is well-connected or A4 concepts are absent.")
530          return
531  
532      print(f"Found {len(orphans)} orphans with A4 semantic field:\n")
533  
534      for orphan in sorted(orphans, key=lambda x: x['score'], reverse=True):
535          score_pct = orphan['score'] * 100
536          print(f"  [{score_pct:5.1f}%] {orphan['file']}")
537          if orphan.get('exemplars'):
538              for ex in orphan['exemplars'][:2]:
539                  print(f"          Matches: \"{ex}\"")
540          print()
541  
542      print("-"*60)
543      print("SUGGESTION: These orphans might connect to A4 or its derived principles.")
544      print("Use: python scripts/resonance_engine.py <orphan_file> --apply")
545      print("-"*60 + "\n")
546  
547  
548  def generate_credit_graph() -> None:
549      """Generate a Mermaid visualization of edge-as-concept credit flow."""
550      print("\n" + "="*60)
551      print("A4 STEWARD - Credit Flow Visualization")
552      print("="*60 + "\n")
553  
554      mermaid = '''```mermaid
555  graph LR
556      subgraph "Source Insights"
557          I1["Deploy errors cost more<br/>than build errors"]
558          I2["Context loss is terminal"]
559          I3["Rewrites are cheap"]
560      end
561  
562      subgraph "Edges as Concepts"
563          E1[["Edge: Build vs Deploy<br/>Asymmetry"]]
564          E2[["Edge: Reversibility<br/>Determines Relevance"]]
565          E3[["Edge: Ensemble vs<br/>Time Average"]]
566      end
567  
568      subgraph "Principle"
569          A4(("A4: Ergodic<br/>Asymmetry"))
570      end
571  
572      subgraph "Downstream Clarity"
573          D1["tribal-build-protocol"]
574          D2["phoenix-extraction"]
575          D3["building-as-cognition"]
576      end
577  
578      I1 -->|discovered| E1
579      I2 -->|discovered| E2
580      I3 -->|discovered| E3
581  
582      E1 -->|defines edge of| A4
583      E2 -->|defines edge of| A4
584      E3 -->|defines edge of| A4
585  
586      E1 -->|clarifies| D1
587      E2 -->|clarifies| D2
588      E1 -->|clarifies| D3
589      E3 -->|clarifies| D3
590  
591      style E1 fill:#ffeb3b,stroke:#f57f17
592      style E2 fill:#ffeb3b,stroke:#f57f17
593      style E3 fill:#ffeb3b,stroke:#f57f17
594      style A4 fill:#4caf50,stroke:#2e7d32
595  ```'''
596  
597      print("Credit Flow Graph (Mermaid):\n")
598      print(mermaid)
599  
600      print("\n" + "-"*60)
601      print("KEY:")
602      print("  Source Insights → discovered → Edges (as concepts)")
603      print("  Edges → defines edge of → Principle")
604      print("  Edges → clarifies → Downstream files")
605      print("-"*60)
606      print("\nEdges are CONCEPTS THEMSELVES that bridge:")
607      print("  - Disparate insights (anywhere in graph)")
608      print("  - Principle boundaries")
609      print("  - Downstream clarity")
610      print("\nCredit flows: Source → Edge → Principle → Downstream ΔF")
611      print("="*60 + "\n")
612  
613      # Save to file
614      output_path = get_base_path() / "sessions/principle-topology/A4-credit-graph.md"
615      output_path.write_text(f"# A4 Credit Flow Graph\n\n{mermaid}\n", encoding='utf-8')
616      print(f"Saved to: {output_path}")
617  
618  
619  def main():
620      parser = argparse.ArgumentParser(
621          description='A4 Steward - Principle topology maintenance for Ergodic Asymmetry',
622          epilog='Pattern: The Talmud defines the edges of the Torah'
623      )
624  
625      parser.add_argument(
626          '--crawl',
627          action='store_true',
628          help='Crawl the graph and update A4 topology'
629      )
630      parser.add_argument(
631          '--test',
632          type=str,
633          metavar='TEXT',
634          help='Test a statement for A4 resonance'
635      )
636      parser.add_argument(
637          '--edge',
638          type=str,
639          metavar='DESC',
640          help='Record a new edge discovery'
641      )
642      parser.add_argument(
643          '--propagate',
644          action='store_true',
645          help='Propagate edges to all A4 connections'
646      )
647      parser.add_argument(
648          '--find-hidden',
649          action='store_true',
650          help='Find orphans that might now connect with current edges'
651      )
652      parser.add_argument(
653          '--credit-graph',
654          action='store_true',
655          help='Generate Mermaid visualization of credit flow'
656      )
657      parser.add_argument(
658          '--json',
659          action='store_true',
660          help='Output in JSON format (for crawl)'
661      )
662  
663      args = parser.parse_args()
664  
665      if args.crawl:
666          results = crawl_topology()
667          if args.json:
668              print(json.dumps(results, indent=2))
669          else:
670              print(f"\nA4 Topology Crawl Complete")
671              print(f"  Files scanned: {results['files_scanned']}")
672              print(f"  Direct references: {len(results['direct_references'])}")
673              print(f"  Semantic matches: {len(results['semantic_matches'])}")
674              print(f"  High resonance: {len(results['high_resonance'])}")
675              print(f"  Orphans with A4 terms: {len(results['orphans_with_a4_terms'])}")
676      elif args.test:
677          test_statement(args.test)
678      elif args.edge:
679          record_edge(args.edge)
680      elif args.propagate:
681          propagate_edges()
682      elif args.find_hidden:
683          find_hidden_connections()
684      elif args.credit_graph:
685          generate_credit_graph()
686      else:
687          parser.print_help()
688          return 1
689  
690      return 0
691  
692  
693  if __name__ == '__main__':
694      sys.exit(main())