close_down_report.py
1 #!/usr/bin/env python3 2 """ 3 Close Down Report Schema - Full Session Accounting 4 5 This module defines the schema for end-of-session accounting. 6 Every session (real or speed run) produces a Close Down Report 7 that provides full transparency on: 8 - Work done and value created 9 - Free energy changes (alignment) 10 - Cost vs value analysis 11 - Protocol compliance 12 - Thread accounting (for hierarchical runs) 13 - Recommendations for continuation 14 15 Usage: 16 from core.metacog.close_down_report import CloseDownReport, generate_report 17 18 report = CloseDownReport(session_id="2026-01-15-building-steward") 19 report.add_artifact(...) 20 report.record_free_energy(...) 21 22 # Generate markdown 23 markdown = report.to_markdown() 24 25 # Save to file 26 report.save() 27 """ 28 29 from dataclasses import dataclass, field 30 from datetime import datetime, timedelta 31 from enum import Enum 32 from typing import Dict, List, Optional, Set, Tuple, Any 33 from pathlib import Path 34 import json 35 import yaml 36 37 38 # ============================================================================= 39 # ENUMS 40 # ============================================================================= 41 42 class ArtifactType(Enum): 43 """Types of artifacts that can be created in a session.""" 44 CODE = "code" 45 PATTERN = "pattern" 46 PRINCIPLE = "principle" 47 SCRIPT = "script" 48 DOCUMENTATION = "documentation" 49 SCHEMA = "schema" 50 PROTOCOL = "protocol" 51 TOPOLOGY = "topology" 52 SYNTHESIS = "synthesis" 53 FIX = "fix" 54 55 56 class ThreadStatus(Enum): 57 """Status of a thread in hierarchical execution.""" 58 ACTIVE = "active" 59 COLLAPSED = "collapsed" # Confidence decayed, returned to main 60 COMPLETED = "completed" 61 BLOCKED = "blocked" # Waiting for escalation 62 63 64 class ComplianceStatus(Enum): 65 """Protocol compliance status.""" 66 PASSED = "passed" 67 WARNING = "warning" 68 FAILED = "failed" 69 SKIPPED = "skipped" 70 71 72 # ============================================================================= 73 # DATA STRUCTURES 74 # ============================================================================= 75 76 @dataclass 77 class Artifact: 78 """A work artifact produced during the session.""" 79 artifact_id: str 80 name: str 81 artifact_type: ArtifactType 82 path: Optional[str] = None 83 84 # Value scoring 85 v_estimate: float = 0.0 # Pre-work value estimate 86 v_actual: float = 0.0 # Post-work actual value 87 88 # Cost tracking 89 cost_tokens: int = 0 # Approximate tokens spent 90 cost_complexity: float = 0.0 # Subjective complexity (0-1) 91 92 # Metadata 93 created_at: datetime = field(default_factory=datetime.now) 94 description: str = "" 95 axiom_alignment: Dict[str, float] = field(default_factory=dict) # A0-A4 scores 96 97 @property 98 def net_value(self) -> float: 99 """Net value = actual value - normalized cost.""" 100 normalized_cost = self.cost_complexity * 0.5 + (self.cost_tokens / 10000) * 0.5 101 return self.v_actual - min(normalized_cost, 1.0) 102 103 @property 104 def estimation_error(self) -> float: 105 """How far off was our estimate?""" 106 return abs(self.v_estimate - self.v_actual) 107 108 109 @dataclass 110 class FreeEnergyRecord: 111 """Free energy measurement at a point in time.""" 112 timestamp: datetime 113 category: str # e.g., "protocol", "alignment", "hygiene" 114 f_value: float 115 axiom_deviations: Dict[str, float] = field(default_factory=dict) # A0-A4 116 notes: str = "" 117 118 119 @dataclass 120 class ThreadRecord: 121 """Record of a thread in hierarchical execution.""" 122 thread_id: str 123 model: str # "opus", "sonnet", "haiku" 124 parent_thread: Optional[str] = None 125 126 # Execution tracking 127 prompt_count: int = 0 128 started_at: datetime = field(default_factory=datetime.now) 129 ended_at: Optional[datetime] = None 130 131 # Confidence tracking 132 confidence_start: float = 1.0 133 confidence_end: float = 1.0 134 confidence_min: float = 1.0 135 collapse_threshold: float = 0.4 136 137 # Status 138 status: ThreadStatus = ThreadStatus.ACTIVE 139 collapse_reason: str = "" 140 141 # Output 142 yield_summary: str = "" # What usable work came from this thread 143 yield_score: float = 0.0 # 0-1, how much was usable 144 145 @property 146 def duration(self) -> Optional[timedelta]: 147 if self.ended_at: 148 return self.ended_at - self.started_at 149 return None 150 151 @property 152 def collapsed(self) -> bool: 153 return self.status == ThreadStatus.COLLAPSED 154 155 156 @dataclass 157 class ComplianceCheck: 158 """Record of a protocol compliance check.""" 159 check_name: str 160 status: ComplianceStatus 161 timestamp: datetime = field(default_factory=datetime.now) 162 details: str = "" 163 auto_fixed: bool = False # Did we automatically fix the issue? 164 165 166 @dataclass 167 class Decision: 168 """A decision made during the session.""" 169 decision_id: str 170 description: str 171 confidence: float 172 timestamp: datetime = field(default_factory=datetime.now) 173 174 # Analysis 175 options_considered: List[str] = field(default_factory=list) 176 chosen_option: str = "" 177 reasoning: str = "" 178 179 # Retrospective 180 should_have_escalated: Optional[bool] = None 181 outcome_notes: str = "" 182 183 # Axiom alignment 184 axiom_relevance: Dict[str, float] = field(default_factory=dict) 185 186 187 @dataclass 188 class CreditAttribution: 189 """Credit for value created by an insight or edge discovery.""" 190 credit_id: str 191 source: str # What insight/statement triggered this 192 193 # Propagation 194 edges_discovered: List[str] = field(default_factory=list) 195 connections_clarified: int = 0 196 orphans_connected: int = 0 197 198 # Value 199 total_delta_f: float = 0.0 # Total free energy reduction 200 downstream_artifacts: List[str] = field(default_factory=list) 201 202 203 @dataclass 204 class Recommendation: 205 """Recommendation for future sessions.""" 206 priority: int # 1 = highest 207 category: str # "continue", "investigate", "fix", "escalate" 208 description: str 209 context: str = "" 210 related_artifacts: List[str] = field(default_factory=list) 211 212 213 # ============================================================================= 214 # MAIN REPORT CLASS 215 # ============================================================================= 216 217 @dataclass 218 class CloseDownReport: 219 """ 220 Full accounting for a session. 221 222 This is the "speed run scorecard" - everything that happened, 223 measured and accounted for. 224 """ 225 226 # === Session Metadata === 227 session_id: str 228 session_type: str = "real" # "real" or "speed_run" 229 started_at: datetime = field(default_factory=datetime.now) 230 ended_at: Optional[datetime] = None 231 232 # Models used 233 primary_model: str = "opus" 234 models_used: Set[str] = field(default_factory=lambda: {"opus"}) 235 236 # Context tracking 237 context_compressions: int = 0 238 total_tokens_used: int = 0 239 240 # === Work Inventory === 241 artifacts: List[Artifact] = field(default_factory=list) 242 files_modified: List[str] = field(default_factory=list) 243 files_created: List[str] = field(default_factory=list) 244 245 # === Free Energy Tracking === 246 f_start: float = 0.0 247 f_end: float = 0.0 248 f_records: List[FreeEnergyRecord] = field(default_factory=list) 249 250 # === Thread Accounting === 251 threads: List[ThreadRecord] = field(default_factory=list) 252 main_thread_id: str = "main" 253 254 # === Protocol Compliance === 255 compliance_checks: List[ComplianceCheck] = field(default_factory=list) 256 phoenix_checkpoints: int = 0 257 escalations_made: int = 0 258 escalations_needed: int = 0 259 260 # === Decisions === 261 decisions: List[Decision] = field(default_factory=list) 262 263 # === Credit Attribution === 264 credits: List[CreditAttribution] = field(default_factory=list) 265 266 # === Recommendations === 267 recommendations: List[Recommendation] = field(default_factory=list) 268 269 # === Gravity Wells === 270 gravity_wells: Dict[str, float] = field(default_factory=dict) 271 272 # === Notes === 273 session_summary: str = "" 274 key_insights: List[str] = field(default_factory=list) 275 paths_not_taken: List[str] = field(default_factory=list) 276 277 # ========================================================================= 278 # COMPUTED PROPERTIES 279 # ========================================================================= 280 281 @property 282 def duration(self) -> Optional[timedelta]: 283 if self.ended_at: 284 return self.ended_at - self.started_at 285 return datetime.now() - self.started_at 286 287 @property 288 def delta_f(self) -> float: 289 """Total free energy change.""" 290 return self.f_end - self.f_start 291 292 @property 293 def total_value_created(self) -> float: 294 """Sum of net value from all artifacts.""" 295 return sum(a.net_value for a in self.artifacts) 296 297 @property 298 def total_v_estimate(self) -> float: 299 """Sum of estimated value.""" 300 return sum(a.v_estimate for a in self.artifacts) 301 302 @property 303 def total_v_actual(self) -> float: 304 """Sum of actual value.""" 305 return sum(a.v_actual for a in self.artifacts) 306 307 @property 308 def estimation_accuracy(self) -> float: 309 """How accurate were our value estimates? (0-1, 1=perfect)""" 310 if not self.artifacts: 311 return 1.0 312 avg_error = sum(a.estimation_error for a in self.artifacts) / len(self.artifacts) 313 return max(0, 1 - avg_error) 314 315 @property 316 def compliance_score(self) -> float: 317 """Overall protocol compliance (0-1).""" 318 if not self.compliance_checks: 319 return 1.0 320 passed = sum(1 for c in self.compliance_checks if c.status == ComplianceStatus.PASSED) 321 return passed / len(self.compliance_checks) 322 323 @property 324 def thread_yield(self) -> float: 325 """Average yield from all threads.""" 326 if not self.threads: 327 return 1.0 328 return sum(t.yield_score for t in self.threads) / len(self.threads) 329 330 @property 331 def collapsed_thread_count(self) -> int: 332 """How many threads collapsed due to confidence decay.""" 333 return sum(1 for t in self.threads if t.collapsed) 334 335 # ========================================================================= 336 # BUILDER METHODS 337 # ========================================================================= 338 339 def add_artifact( 340 self, 341 name: str, 342 artifact_type: ArtifactType, 343 path: Optional[str] = None, 344 v_estimate: float = 0.5, 345 description: str = "" 346 ) -> Artifact: 347 """Add an artifact to the work inventory.""" 348 artifact = Artifact( 349 artifact_id=f"art-{len(self.artifacts):03d}", 350 name=name, 351 artifact_type=artifact_type, 352 path=path, 353 v_estimate=v_estimate, 354 description=description 355 ) 356 self.artifacts.append(artifact) 357 return artifact 358 359 def record_free_energy( 360 self, 361 category: str, 362 f_value: float, 363 axiom_deviations: Dict[str, float] = None, 364 notes: str = "" 365 ): 366 """Record a free energy measurement.""" 367 record = FreeEnergyRecord( 368 timestamp=datetime.now(), 369 category=category, 370 f_value=f_value, 371 axiom_deviations=axiom_deviations or {}, 372 notes=notes 373 ) 374 self.f_records.append(record) 375 self.f_end = f_value # Update current F 376 377 def add_thread( 378 self, 379 thread_id: str, 380 model: str = "sonnet", 381 parent_thread: str = None, 382 confidence_start: float = 0.8 383 ) -> ThreadRecord: 384 """Add a thread to tracking.""" 385 thread = ThreadRecord( 386 thread_id=thread_id, 387 model=model, 388 parent_thread=parent_thread or self.main_thread_id, 389 confidence_start=confidence_start, 390 confidence_end=confidence_start, 391 confidence_min=confidence_start 392 ) 393 self.threads.append(thread) 394 self.models_used.add(model) 395 return thread 396 397 def collapse_thread( 398 self, 399 thread_id: str, 400 reason: str, 401 yield_summary: str, 402 yield_score: float 403 ): 404 """Mark a thread as collapsed.""" 405 for thread in self.threads: 406 if thread.thread_id == thread_id: 407 thread.status = ThreadStatus.COLLAPSED 408 thread.ended_at = datetime.now() 409 thread.collapse_reason = reason 410 thread.yield_summary = yield_summary 411 thread.yield_score = yield_score 412 break 413 414 def add_compliance_check( 415 self, 416 check_name: str, 417 status: ComplianceStatus, 418 details: str = "", 419 auto_fixed: bool = False 420 ): 421 """Record a compliance check.""" 422 check = ComplianceCheck( 423 check_name=check_name, 424 status=status, 425 details=details, 426 auto_fixed=auto_fixed 427 ) 428 self.compliance_checks.append(check) 429 430 def add_decision( 431 self, 432 description: str, 433 confidence: float, 434 options: List[str], 435 chosen: str, 436 reasoning: str = "" 437 ) -> Decision: 438 """Record a decision.""" 439 decision = Decision( 440 decision_id=f"dec-{len(self.decisions):03d}", 441 description=description, 442 confidence=confidence, 443 options_considered=options, 444 chosen_option=chosen, 445 reasoning=reasoning 446 ) 447 self.decisions.append(decision) 448 return decision 449 450 def add_recommendation( 451 self, 452 description: str, 453 priority: int = 2, 454 category: str = "continue" 455 ): 456 """Add a recommendation for next session.""" 457 rec = Recommendation( 458 priority=priority, 459 category=category, 460 description=description 461 ) 462 self.recommendations.append(rec) 463 self.recommendations.sort(key=lambda r: r.priority) 464 465 def finalize(self): 466 """Finalize the report (call at session end).""" 467 self.ended_at = datetime.now() 468 469 # Update artifact actual values if not set 470 for artifact in self.artifacts: 471 if artifact.v_actual == 0: 472 artifact.v_actual = artifact.v_estimate # Assume estimate was correct 473 474 # ========================================================================= 475 # OUTPUT METHODS 476 # ========================================================================= 477 478 def to_dict(self) -> Dict[str, Any]: 479 """Convert to dictionary for serialization.""" 480 return { 481 'session_id': self.session_id, 482 'session_type': self.session_type, 483 'started_at': self.started_at.isoformat(), 484 'ended_at': self.ended_at.isoformat() if self.ended_at else None, 485 'duration_minutes': self.duration.total_seconds() / 60 if self.duration else None, 486 'primary_model': self.primary_model, 487 'models_used': list(self.models_used), 488 'context_compressions': self.context_compressions, 489 490 # Summary metrics 491 'metrics': { 492 'f_start': self.f_start, 493 'f_end': self.f_end, 494 'delta_f': self.delta_f, 495 'total_value_created': self.total_value_created, 496 'total_v_estimate': self.total_v_estimate, 497 'total_v_actual': self.total_v_actual, 498 'estimation_accuracy': self.estimation_accuracy, 499 'compliance_score': self.compliance_score, 500 'thread_yield': self.thread_yield, 501 'artifact_count': len(self.artifacts), 502 'thread_count': len(self.threads), 503 'collapsed_threads': self.collapsed_thread_count, 504 'phoenix_checkpoints': self.phoenix_checkpoints, 505 }, 506 507 # Detailed data 508 'artifacts': [ 509 { 510 'id': a.artifact_id, 511 'name': a.name, 512 'type': a.artifact_type.value, 513 'path': a.path, 514 'v_estimate': a.v_estimate, 515 'v_actual': a.v_actual, 516 'net_value': a.net_value, 517 'description': a.description, 518 } 519 for a in self.artifacts 520 ], 521 522 'threads': [ 523 { 524 'id': t.thread_id, 525 'model': t.model, 526 'status': t.status.value, 527 'confidence_range': f"{t.confidence_start:.2f}-{t.confidence_end:.2f}", 528 'yield_score': t.yield_score, 529 'collapsed': t.collapsed, 530 } 531 for t in self.threads 532 ], 533 534 'compliance': [ 535 { 536 'check': c.check_name, 537 'status': c.status.value, 538 'details': c.details, 539 } 540 for c in self.compliance_checks 541 ], 542 543 'decisions': [ 544 { 545 'id': d.decision_id, 546 'description': d.description, 547 'confidence': d.confidence, 548 'chosen': d.chosen_option, 549 } 550 for d in self.decisions 551 ], 552 553 'recommendations': [ 554 { 555 'priority': r.priority, 556 'category': r.category, 557 'description': r.description, 558 } 559 for r in self.recommendations 560 ], 561 562 'key_insights': self.key_insights, 563 'paths_not_taken': self.paths_not_taken, 564 'session_summary': self.session_summary, 565 } 566 567 def to_markdown(self) -> str: 568 """Generate markdown report.""" 569 lines = [] 570 571 # Header 572 lines.append(f"# Close Down Report: {self.session_id}") 573 lines.append("") 574 lines.append(f"*{self.session_type.upper()} | {self.started_at.strftime('%Y-%m-%d %H:%M')} | " 575 f"Duration: {self.duration.total_seconds() / 60:.1f} min*") 576 lines.append("") 577 578 # Summary Box 579 lines.append("---") 580 lines.append("") 581 lines.append("## Executive Summary") 582 lines.append("") 583 lines.append(f"| Metric | Value |") 584 lines.append(f"|--------|-------|") 585 lines.append(f"| **Free Energy Change** | ΔF = {self.delta_f:+.3f} ({'improved' if self.delta_f < 0 else 'degraded'}) |") 586 lines.append(f"| **Value Created** | {self.total_value_created:.2f} |") 587 lines.append(f"| **Estimation Accuracy** | {self.estimation_accuracy:.0%} |") 588 lines.append(f"| **Compliance Score** | {self.compliance_score:.0%} |") 589 lines.append(f"| **Artifacts Shipped** | {len(self.artifacts)} |") 590 lines.append(f"| **Threads Used** | {len(self.threads)} ({self.collapsed_thread_count} collapsed) |") 591 lines.append(f"| **Phoenix Checkpoints** | {self.phoenix_checkpoints} |") 592 lines.append("") 593 594 # Session Summary 595 if self.session_summary: 596 lines.append(f"> {self.session_summary}") 597 lines.append("") 598 599 # Work Inventory 600 lines.append("---") 601 lines.append("") 602 lines.append("## Work Inventory") 603 lines.append("") 604 if self.artifacts: 605 lines.append("| Artifact | Type | V Est | V Act | Net | Path |") 606 lines.append("|----------|------|-------|-------|-----|------|") 607 for a in self.artifacts: 608 lines.append(f"| {a.name} | {a.artifact_type.value} | {a.v_estimate:.2f} | " 609 f"{a.v_actual:.2f} | {a.net_value:+.2f} | `{a.path or 'N/A'}` |") 610 lines.append("") 611 else: 612 lines.append("*No artifacts recorded*") 613 lines.append("") 614 615 # Free Energy Accounting 616 lines.append("---") 617 lines.append("") 618 lines.append("## Free Energy Accounting") 619 lines.append("") 620 lines.append(f"| Checkpoint | F Value | Category | Notes |") 621 lines.append(f"|------------|---------|----------|-------|") 622 lines.append(f"| START | {self.f_start:.3f} | baseline | Session start |") 623 for rec in self.f_records: 624 lines.append(f"| {rec.timestamp.strftime('%H:%M')} | {rec.f_value:.3f} | " 625 f"{rec.category} | {rec.notes[:40] or '-'} |") 626 lines.append(f"| END | {self.f_end:.3f} | final | **ΔF = {self.delta_f:+.3f}** |") 627 lines.append("") 628 629 # Thread Accounting 630 if self.threads: 631 lines.append("---") 632 lines.append("") 633 lines.append("## Thread Accounting") 634 lines.append("") 635 lines.append("| Thread | Model | Prompts | Confidence | Status | Yield |") 636 lines.append("|--------|-------|---------|------------|--------|-------|") 637 for t in self.threads: 638 status_icon = "🔴" if t.collapsed else "🟢" 639 lines.append(f"| {t.thread_id} | {t.model} | {t.prompt_count} | " 640 f"{t.confidence_start:.2f}→{t.confidence_end:.2f} | " 641 f"{status_icon} {t.status.value} | {t.yield_score:.0%} |") 642 lines.append("") 643 644 if self.collapsed_thread_count > 0: 645 lines.append("**Collapse Details:**") 646 for t in self.threads: 647 if t.collapsed: 648 lines.append(f"- `{t.thread_id}`: {t.collapse_reason}") 649 lines.append(f" - Yield: {t.yield_summary}") 650 lines.append("") 651 652 # Protocol Compliance 653 lines.append("---") 654 lines.append("") 655 lines.append("## Protocol Compliance") 656 lines.append("") 657 if self.compliance_checks: 658 lines.append("| Check | Status | Details |") 659 lines.append("|-------|--------|---------|") 660 for c in self.compliance_checks: 661 icon = {"passed": "✓", "warning": "⚠️", "failed": "✗", "skipped": "○"}[c.status.value] 662 lines.append(f"| {c.check_name} | {icon} {c.status.value} | {c.details[:50] or '-'} |") 663 lines.append("") 664 665 lines.append(f"- **Escalations made**: {self.escalations_made}") 666 lines.append(f"- **Escalations needed**: {self.escalations_needed}") 667 lines.append(f"- **Phoenix checkpoints**: {self.phoenix_checkpoints}") 668 lines.append("") 669 670 # Decisions 671 if self.decisions: 672 lines.append("---") 673 lines.append("") 674 lines.append("## Decisions Made") 675 lines.append("") 676 lines.append("| Decision | Confidence | Chosen | Should Escalate? |") 677 lines.append("|----------|------------|--------|------------------|") 678 for d in self.decisions: 679 escalate = "?" if d.should_have_escalated is None else ("Yes" if d.should_have_escalated else "No") 680 lines.append(f"| {d.description[:40]} | {d.confidence:.0%} | {d.chosen_option} | {escalate} |") 681 lines.append("") 682 683 # Key Insights 684 if self.key_insights: 685 lines.append("---") 686 lines.append("") 687 lines.append("## Key Insights") 688 lines.append("") 689 for i, insight in enumerate(self.key_insights, 1): 690 lines.append(f"{i}. {insight}") 691 lines.append("") 692 693 # Paths Not Taken 694 if self.paths_not_taken: 695 lines.append("---") 696 lines.append("") 697 lines.append("## Paths Not Taken") 698 lines.append("") 699 for path in self.paths_not_taken: 700 lines.append(f"- {path}") 701 lines.append("") 702 703 # Recommendations 704 if self.recommendations: 705 lines.append("---") 706 lines.append("") 707 lines.append("## Recommendations for Next Session") 708 lines.append("") 709 for rec in self.recommendations: 710 priority_icon = {1: "🔴", 2: "🟡", 3: "🟢"}.get(rec.priority, "○") 711 lines.append(f"{priority_icon} **[{rec.category.upper()}]** {rec.description}") 712 lines.append("") 713 714 # Footer 715 lines.append("---") 716 lines.append("") 717 lines.append(f"*Generated: {datetime.now().isoformat()} | Models: {', '.join(sorted(self.models_used))}*") 718 719 return "\n".join(lines) 720 721 def save(self, directory: Path = None): 722 """Save report to files (markdown + JSON).""" 723 if directory is None: 724 directory = Path(__file__).parent.parent.parent / "sessions" / "close-down-reports" 725 726 directory.mkdir(parents=True, exist_ok=True) 727 728 base_name = f"{self.session_id}-close-down" 729 730 # Save markdown 731 md_path = directory / f"{base_name}.md" 732 md_path.write_text(self.to_markdown()) 733 734 # Save JSON 735 json_path = directory / f"{base_name}.json" 736 json_path.write_text(json.dumps(self.to_dict(), indent=2, default=str)) 737 738 return md_path, json_path 739 740 741 # ============================================================================= 742 # FACTORY FUNCTIONS 743 # ============================================================================= 744 745 def create_report_from_live_compression(live_compression_path: Path) -> CloseDownReport: 746 """ 747 Create a CloseDownReport by parsing LIVE-COMPRESSION.md. 748 749 This extracts session state from the phoenix file and initializes 750 a report for finalization. 751 """ 752 content = live_compression_path.read_text() 753 754 # Extract session ID from filename or content 755 session_id = "unknown" 756 for line in content.split('\n'): 757 if line.startswith('# Live Compression - '): 758 session_id = line.replace('# Live Compression - ', '').strip() 759 break 760 761 report = CloseDownReport(session_id=session_id) 762 763 # Parse metadata 764 in_metadata = False 765 for line in content.split('\n'): 766 if '- **metadata**' in line: 767 in_metadata = True 768 continue 769 if in_metadata: 770 if line.startswith(' - '): 771 if 'free_energy::' in line: 772 try: 773 f_val = float(line.split('=')[1].split()[0]) 774 report.f_end = f_val 775 except: 776 pass 777 elif 'checkpoint::' in line: 778 try: 779 report.phoenix_checkpoints = int(line.split('::')[1].strip().split()[0]) 780 except: 781 pass 782 elif line.startswith('---'): 783 in_metadata = False 784 785 return report 786 787 788 # ============================================================================= 789 # CLI 790 # ============================================================================= 791 792 if __name__ == "__main__": 793 # Demo report 794 print("=== Close Down Report Demo ===\n") 795 796 report = CloseDownReport( 797 session_id="2026-01-15-building-steward", 798 session_type="real", 799 f_start=0.15 800 ) 801 802 # Add some artifacts 803 report.add_artifact( 804 name="BuildingStewardLayer", 805 artifact_type=ArtifactType.CODE, 806 path="core/consciousness/first_officer.py", 807 v_estimate=0.85, 808 description="Pattern lifecycle tracking + architecture guidance" 809 ) 810 report.artifacts[-1].v_actual = 0.80 811 812 report.add_artifact( 813 name="proto-024: Integration Over Separation", 814 artifact_type=ArtifactType.PATTERN, 815 path="patterns/integration-over-separation.md", 816 v_estimate=0.60, 817 ) 818 report.artifacts[-1].v_actual = 0.75 819 820 report.add_artifact( 821 name="phoenix_hygiene.py", 822 artifact_type=ArtifactType.SCRIPT, 823 path="scripts/phoenix_hygiene.py", 824 v_estimate=0.70, 825 ) 826 report.artifacts[-1].v_actual = 0.90 827 828 # Record free energy changes 829 report.record_free_energy("protocol", 0.12, notes="After typed resonance") 830 report.record_free_energy("hygiene", 0.08, notes="After Building Steward") 831 report.record_free_energy("alignment", 0.05, notes="After hygiene enforcement") 832 833 # Add threads 834 main = report.add_thread("main", model="opus", confidence_start=0.95) 835 main.prompt_count = 15 836 837 # Add compliance checks 838 report.add_compliance_check("phoenix_staleness", ComplianceStatus.PASSED) 839 report.add_compliance_check("phoenix_content", ComplianceStatus.PASSED) 840 report.add_compliance_check("insight_backlog", ComplianceStatus.PASSED) 841 842 # Add decisions 843 report.add_decision( 844 description="One steward or two?", 845 confidence=0.75, 846 options=["One steward with modes", "Two separate stewards"], 847 chosen="One steward with modes", 848 reasoning="Same Markov blanket, different orientations" 849 ) 850 851 # Add insights 852 report.key_insights = [ 853 "Integration over separation - prefer layers in existing systems", 854 "Automatic mode detection - living systems respond to context", 855 "Enforceable code > documentation suggestions", 856 ] 857 858 report.paths_not_taken = [ 859 "Separate Building Steward daemon", 860 "Manual mode switching", 861 ] 862 863 report.add_recommendation("Continue pattern extraction from session work", priority=2, category="continue") 864 report.add_recommendation("Test hygiene enforcement in speed run", priority=1, category="investigate") 865 866 report.session_summary = "Implemented Building Steward, extracted meta-patterns, built enforceable hygiene checks." 867 report.phoenix_checkpoints = 4 868 869 # Finalize 870 report.finalize() 871 872 # Output 873 print(report.to_markdown())