/ justfile
justfile
   1  # Justfile for Aura project automation
   2  
   3  # Default recipe - show available commands
   4  default:
   5      @just --list
   6  
   7  # Generate docs/SUMMARY.md from Markdown files in docs/ and subfolders
   8  summary:
   9      #!/usr/bin/env bash
  10      set -euo pipefail
  11  
  12      docs="docs"
  13      build_dir="$docs/book"
  14      out="$docs/SUMMARY.md"
  15  
  16      echo "# Summary" > "$out"
  17      echo "" >> "$out"
  18  
  19      # Helper function to extract title from markdown file
  20      get_title() {
  21          local f="$1"
  22          local title
  23          title="$(grep -m1 '^# ' "$f" | sed 's/^# *//')"
  24          if [ -z "$title" ]; then
  25              local base="$(basename "${f%.*}")"
  26              title="$(printf '%s\n' "$base" \
  27                  | tr '._-' '   ' \
  28                  | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) substr($i,2) }}1')"
  29          fi
  30          echo "$title"
  31      }
  32  
  33      # Helper function to get chapter name from directory
  34      get_chapter_name() {
  35          local dir="$1"
  36          # Capitalize first letter of each word
  37          echo "$dir" | tr '_-' '  ' | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) substr($i,2) }}1'
  38      }
  39  
  40      # Collect all files, organized by directory
  41      declare -A dirs
  42      declare -a root_files
  43  
  44      while IFS= read -r f; do
  45          rel="${f#$docs/}"
  46  
  47          # Skip SUMMARY.md
  48          [ "$rel" = "SUMMARY.md" ] && continue
  49  
  50          # Skip files under the build output directory
  51          case "$f" in "$build_dir"/*) continue ;; esac
  52  
  53          # Check if file is in a subdirectory
  54          if [[ "$rel" == */* ]]; then
  55              # Extract directory name (first component of path)
  56              dir="${rel%%/*}"
  57              # Add file to this directory's list
  58              dirs[$dir]+="$f"$'\n'
  59          else
  60              # Root-level file
  61              root_files+=("$f")
  62          fi
  63      done < <(find "$docs" -type f -name '*.md' -not -name 'SUMMARY.md' -not -path "$build_dir/*" | LC_ALL=C sort)
  64  
  65      # Write root-level files first
  66      for f in "${root_files[@]}"; do
  67          rel="${f#$docs/}"
  68          title="$(get_title "$f")"
  69          echo "- [$title]($rel)" >> "$out"
  70      done
  71  
  72      # Write chapters (directories) with their files
  73      for dir in $(printf '%s\n' "${!dirs[@]}" | LC_ALL=C sort); do
  74          # Add blank line before chapter
  75          [ ${#root_files[@]} -gt 0 ] && echo "" >> "$out"
  76  
  77          # Add chapter heading
  78          chapter_name="$(get_chapter_name "$dir")"
  79          echo "# $chapter_name" >> "$out"
  80          echo "" >> "$out"
  81  
  82          # Add files in this directory
  83          while IFS= read -r f; do
  84              [ -z "$f" ] && continue
  85              rel="${f#$docs/}"
  86              title="$(get_title "$f")"
  87              echo "- [$title]($rel)" >> "$out"
  88          done < <(echo -n "${dirs[$dir]}" | LC_ALL=C sort)
  89      done
  90  
  91      echo "Wrote $out"
  92  
  93  # Build the book after regenerating the summary
  94  book: summary
  95      echo '.chapter-item a strong { display: none; }' > custom.css
  96      AURA_SUPPRESS_NIX_WELCOME=1 nix develop --quiet --command bash -c 'mdbook-mermaid install . > /dev/null 2>&1 || true && mdbook build && rm -f mermaid-init.js mermaid.min.js custom.css'
  97  
  98  # Serve locally with live reload
  99  serve-book: summary
 100      #!/usr/bin/env bash
 101      set -euo pipefail
 102  
 103      # Kill any existing mdbook servers
 104      if pgrep -x mdbook > /dev/null; then
 105          echo "Stopping existing mdbook server..."
 106          pkill mdbook
 107          sleep 1
 108      fi
 109  
 110      # Create custom.css transiently, cleanup when server stops
 111      echo '.chapter-item a strong { display: none; }' > custom.css
 112      trap 'rm -f mermaid-init.js mermaid.min.js custom.css' EXIT
 113      AURA_SUPPRESS_NIX_WELCOME=1 nix develop --quiet --command bash -c 'mdbook-mermaid install . > /dev/null 2>&1 || true && mdbook serve --open'
 114  
 115  # Serve documentation with live reload (alias for serve-book)
 116  serve: serve-book
 117  
 118  # Build all crates
 119  build:
 120      cargo build --workspace --verbose
 121  
 122  # Build Aura terminal in development mode (release profile with dev features)
 123  # Creates ./bin/aura wrapper for easy access
 124  build-dev:
 125      cargo build -p aura-terminal --bin aura --features development --release
 126      mkdir -p bin
 127      cp scripts/aura-wrapper.sh bin/aura
 128      chmod +x bin/aura
 129      @echo "Binary available at: ./bin/aura"
 130  
 131  build-app-host:
 132      cargo build -p aura-app --bin app-host --features host --release
 133  
 134  build-terminal-release:
 135      cargo build -p aura-terminal --bin aura --release --no-default-features --features terminal
 136      mkdir -p bin
 137      cp scripts/aura-wrapper.sh bin/aura
 138      chmod +x bin/aura
 139      @echo "Binary available at: ./bin/aura"
 140  
 141  # Build in release mode
 142  build-release:
 143      cargo build --workspace --release --verbose
 144  
 145  # Run all tests
 146  test:
 147      cargo test --workspace --verbose
 148  
 149  # Run all tests with output
 150  test-verbose:
 151      cargo test --workspace --verbose -- --nocapture
 152  
 153  # Run tests for a specific crate
 154  test-crate crate:
 155      cargo test -p {{crate}} --verbose
 156  
 157  # Run tests for a specific crate avoiding architectural violations
 158  test-crate-isolated crate:
 159      #!/usr/bin/env bash
 160      echo "Testing {{crate}} in isolation (lib + unit tests only)..."
 161      # Test just the library code with unit tests, avoiding dev dependencies that may violate architecture
 162      cd "crates/{{crate}}" && cargo test --lib --verbose
 163  
 164  # Check code without building
 165  check:
 166      cargo check --workspace --verbose
 167  
 168  # Run the exact same lint command that Zed editor runs (rust-analyzer checkOnSave)
 169  # This is exactly what Zed runs automatically on file save via rust-analyzer
 170  check-zed:
 171      cargo check --workspace --all-targets
 172  
 173  # Run clippy linter with effects system enforcement
 174  clippy:
 175      cargo clippy --workspace --all-targets --verbose -- -D warnings
 176  
 177  # Strict clippy check enforcing effects system usage
 178  clippy-strict:
 179      cargo clippy --workspace --all-targets --verbose -- -D warnings -D clippy::disallowed_methods -D clippy::disallowed_types
 180  
 181  # Ensure Layer 4 crates do not reintroduce crate-level allow attributes
 182  check-layer4-lints:
 183      #!/usr/bin/env bash
 184      set -euo pipefail
 185      if rg -n "^#!\\[allow" \
 186          crates/aura-guards/src/lib.rs \
 187          crates/aura-consensus/src/lib.rs \
 188          crates/aura-amp/src/lib.rs \
 189          crates/aura-anti-entropy/src/lib.rs \
 190          crates/aura-protocol/src/lib.rs; then
 191          echo "crate-level #![allow] found in Layer 4 lib.rs; move to module scope"
 192          exit 1
 193      fi
 194  
 195  # Test lint enforcement (should fail)
 196  lint-test:
 197      cargo check test_lints.rs
 198  
 199  # Format code
 200  fmt:
 201      cargo fmt --all
 202  
 203  # Check code formatting without modifying files
 204  fmt-check:
 205      cargo fmt --all -- --check
 206  
 207  # Run security audit
 208  audit:
 209      cargo audit
 210  
 211  # Clean build artifacts and generated files
 212  clean:
 213      #!/usr/bin/env bash
 214      set -euo pipefail
 215      echo "Cleaning build artifacts..."
 216      cargo clean
 217  
 218      echo "Cleaning Nix outputs..."
 219      rm -rf result result-*
 220  
 221      echo "Cleaning documentation builds..."
 222      rm -rf docs/book/
 223      rm -f mermaid.min.js mermaid-init.js custom.css
 224  
 225      echo "Cleaning logs..."
 226      rm -rf logs/
 227      rm -f *.log
 228  
 229      echo "Cleaning demo/test data..."
 230      rm -rf .aura-demo/ .aura-test/
 231      rm -rf outcomes/
 232      rm -f *.sealed *.dat *.tmp *.temp
 233  
 234      echo "Cleaning verification artifacts..."
 235      rm -rf verification/lean/.lake/
 236      rm -rf verification/lean/build/
 237      rm -rf verification/lean/Generated/
 238      rm -rf verification/lean/.lean_build/
 239      rm -rf verification/traces/
 240      rm -rf _apalache-out/
 241  
 242      echo "✓ Clean complete"
 243  
 244  # Clean everything including production data (use with caution!)
 245  clean-all: clean
 246      #!/usr/bin/env bash
 247      set -euo pipefail
 248      echo ""
 249      echo "WARNING: This will delete production data in .aura/"
 250      read -p "Are you sure? [y/N] " -n 1 -r
 251      echo
 252      if [[ $REPLY =~ ^[Yy]$ ]]; then
 253          echo "Removing production data..."
 254          rm -rf .aura/
 255          rm -rf secure_store/
 256          echo "✓ All data cleaned"
 257      else
 258          echo "Aborted."
 259      fi
 260  
 261  # Watch and rebuild on changes
 262  watch:
 263      cargo watch -x build
 264  
 265  # Watch and run tests on changes
 266  watch-test:
 267      cargo watch -x test
 268  
 269  # Initialize a new account (Phase 0 smoke test)
 270  init-account:
 271      cargo run --bin aura -- init -n 3 -t 2 -o .aura
 272  
 273  # Show account status
 274  status:
 275      cargo run --bin aura -- status -c .aura/configs/device_1.toml
 276  
 277  # Test key derivation
 278  # test-dkd app_id context:
 279  #     cargo run --bin aura -- test-dkd --app-id {{app_id}} --context {{context}} -f .aura/configs/device_1.toml
 280  #     Note: Disabled - requires agent crate functionality
 281  
 282  # Run Phase 0 smoke tests
 283  smoke-test:
 284      #!/usr/bin/env bash
 285      set -euo pipefail
 286  
 287      echo "Running Phase 0 Smoke Tests"
 288      echo "==========================="
 289      echo ""
 290  
 291      # Clean previous test artifacts
 292      rm -rf .aura-test
 293  
 294      echo "1. Initializing 2-of-3 threshold account..."
 295      cargo run --bin aura -- init -n 3 -t 2 -o .aura-test
 296      echo "OK Account initialized"
 297      echo ""
 298  
 299      echo "2. Verifying effect_api and config files creation..."
 300      if [ -f ".aura-test/effect_api.cbor" ]; then
 301          echo "OK Effect API file created successfully"
 302          echo "Ledger size: $(stat -c%s .aura-test/effect_api.cbor 2>/dev/null || stat -f%z .aura-test/effect_api.cbor) bytes"
 303      else
 304          echo "ERROR: Ledger file not found"
 305          exit 1
 306      fi
 307      if [ -f ".aura-test/configs/device_1.toml" ]; then
 308          echo "OK Config file created successfully"
 309      else
 310          echo "ERROR: Config file not found"
 311          exit 1
 312      fi
 313      echo ""
 314  
 315      echo "3. Checking account status..."
 316      cargo run --bin aura -- status -c .aura-test/configs/device_1.toml
 317      echo "OK Status retrieved"
 318      echo ""
 319  
 320      echo "4. Testing multi-device threshold operations..."
 321  
 322      # Verify all 3 config files exist
 323      echo "   4.1 Verifying all device configs..."
 324      for i in 1 2 3; do
 325          if [ -f ".aura-test/configs/device_${i}.toml" ]; then
 326              echo "   [OK] Device ${i} config found"
 327          else
 328              echo "   ERROR: Device ${i} config not found"
 329              exit 1
 330          fi
 331      done
 332      echo "   OK All 3 device configs verified"
 333  
 334      # Test loading each device config
 335      echo "   4.2 Testing device config loading..."
 336      for i in 1 2 3; do
 337          echo "   Testing device ${i}..."
 338          if cargo run --bin aura -- status -c .aura-test/configs/device_${i}.toml > /dev/null 2>&1; then
 339              echo "   [OK] Device ${i} loaded successfully"
 340          else
 341              echo "   ERROR: Device ${i} failed to load"
 342              exit 1
 343          fi
 344      done
 345      echo "   OK All devices can load their configs"
 346  
 347      # Test starting agents on different ports
 348      echo "   4.3 Testing multi-device agents on different ports..."
 349  
 350      # Function to start an agent in background and capture PID
 351      start_agent() {
 352          local device_num=$1
 353          local port=$((58834 + device_num))
 354          local config_file=".aura-test/configs/device_${device_num}.toml"
 355  
 356          echo "   Starting device ${device_num} on port ${port}..."
 357          cargo run --bin aura -- node --port ${port} --daemon -c ${config_file} &
 358          local pid=$!
 359          echo ${pid} > ".aura-test/agent_${device_num}.pid"
 360  
 361          # Give agent time to start
 362          sleep 2
 363  
 364          # Check if agent is still running
 365          if kill -0 ${pid} 2>/dev/null; then
 366              echo "   [OK] Device ${device_num} agent started successfully (PID: ${pid})"
 367              return 0
 368          else
 369              echo "   ERROR: Device ${device_num} agent failed to start"
 370              return 1
 371          fi
 372      }
 373  
 374      # Function to stop all agents
 375      stop_all_agents() {
 376          echo "   Stopping all test agents..."
 377          for i in 1 2 3; do
 378              if [ -f ".aura-test/agent_${i}.pid" ]; then
 379                  local pid=$(cat ".aura-test/agent_${i}.pid")
 380                  if kill -0 ${pid} 2>/dev/null; then
 381                      kill ${pid}
 382                      echo "   [OK] Stopped agent ${i} (PID: ${pid})"
 383                  fi
 384                  rm -f ".aura-test/agent_${i}.pid"
 385              fi
 386          done
 387      }
 388  
 389      # Set up cleanup trap
 390      trap stop_all_agents EXIT
 391  
 392      # Start all three agents
 393      if start_agent 1 && start_agent 2 && start_agent 3; then
 394          echo "   OK All 3 agents started on different ports"
 395  
 396          # Wait a moment to ensure they're stable
 397          sleep 3
 398  
 399          # Verify agents are still running
 400          echo "   4.4 Verifying agents are stable..."
 401          all_running=true
 402          for i in 1 2 3; do
 403              if [ -f ".aura-test/agent_${i}.pid" ]; then
 404                  pid=$(cat ".aura-test/agent_${i}.pid")
 405                  if kill -0 ${pid} 2>/dev/null; then
 406                      echo "   [OK] Device ${i} agent still running"
 407                  else
 408                      echo "   ERROR: Device ${i} agent stopped unexpectedly"
 409                      all_running=false
 410                  fi
 411              else
 412                  echo "   ERROR: Device ${i} PID file missing"
 413                  all_running=false
 414              fi
 415          done
 416  
 417          if [ "$all_running" = true ]; then
 418              echo "   OK All agents are stable and running"
 419          else
 420              echo "   ERROR: Some agents are not stable"
 421              stop_all_agents
 422              exit 1
 423          fi
 424  
 425          # Clean stop of all agents
 426          stop_all_agents
 427          echo "   OK Agent startup/shutdown test completed"
 428  
 429          # Test threshold signature operation
 430          echo "   4.5 Testing 2-of-3 threshold signature operation..."
 431  
 432          # Test threshold signature with all 3 devices
 433          if cargo run --bin aura -- threshold --configs .aura-test/configs/device_1.toml,.aura-test/configs/device_2.toml,.aura-test/configs/device_3.toml --threshold 2 --mode local > /dev/null 2>&1; then
 434              echo "   [OK] 3-device threshold signature test passed"
 435          else
 436              echo "   ERROR: 3-device threshold signature test failed"
 437              exit 1
 438          fi
 439  
 440          # Test threshold signature with minimum required (2 devices)
 441          if cargo run --bin aura -- threshold --configs .aura-test/configs/device_1.toml,.aura-test/configs/device_2.toml --threshold 2 --mode local > /dev/null 2>&1; then
 442              echo "   [OK] 2-device minimum threshold test passed"
 443          else
 444              echo "   ERROR: 2-device minimum threshold test failed"
 445              exit 1
 446          fi
 447  
 448          echo "   OK Threshold signature operations verified"
 449      else
 450          echo "   ERROR: Failed to start all agents"
 451          stop_all_agents
 452          exit 1
 453      fi
 454      echo ""
 455  
 456      echo "5. Testing scenario discovery..."
 457      if [ -d "scenarios" ]; then
 458          cargo run --bin aura -- scenarios discover --root . > /dev/null 2>&1
 459          echo "OK Scenario discovery functional"
 460      else
 461          echo "SKIP No scenarios directory found"
 462      fi
 463      echo ""
 464  
 465      echo "Phase 0 smoke tests passed!"
 466      echo "Multi-device setup and basic operations functional!"
 467  
 468  # Run macOS Keychain integration tests
 469  test-macos-keychain:
 470      #!/usr/bin/env bash
 471      set -e
 472  
 473      echo "Aura macOS Keychain Integration Tests"
 474      echo "======================================"
 475      echo ""
 476  
 477      # Check that we're on macOS
 478      if [[ "$OSTYPE" != "darwin"* ]]; then
 479          echo "Error: These tests are designed for macOS only"
 480          echo "Current OS: $OSTYPE"
 481          exit 1
 482      fi
 483  
 484      # Check if we're in the right directory
 485      if [[ ! -f "Cargo.toml" ]] || [[ ! -d "crates/agent" ]]; then
 486          echo "Error: Please run this from the Aura project root directory"
 487          exit 1
 488      fi
 489  
 490      echo "Important Notes:"
 491      echo "  - These tests will interact with your macOS Keychain"
 492      echo "  - You may be prompted to allow keychain access"
 493      echo "  - Test data will be created and cleaned up automatically"
 494      echo "  - Some tests may require administrator permissions"
 495      echo ""
 496  
 497      # Prompt for confirmation
 498      read -p "Continue with keychain tests? (y/N): " -n 1 -r
 499      echo
 500      if [[ ! $REPLY =~ ^[Yy]$ ]]; then
 501          echo "Tests cancelled by user"
 502          exit 0
 503      fi
 504  
 505      echo "Starting macOS Keychain Tests..."
 506      echo ""
 507  
 508      # Set test environment variables
 509      export RUST_LOG=debug
 510      export RUST_BACKTRACE=1
 511  
 512      # Run the specific macOS keychain tests
 513      cargo test -p aura-agent --test macos_keychain_tests -- --nocapture --test-threads=1
 514  
 515      TEST_EXIT_CODE=$?
 516  
 517      echo ""
 518      if [ $TEST_EXIT_CODE -eq 0 ]; then
 519          echo "All macOS keychain tests passed!"
 520          echo ""
 521          echo "Secure Storage System Verification:"
 522          echo "  - Platform-specific keychain backend: Working"
 523          echo "  - Hardware UUID derivation: Working"
 524          echo "  - Device attestation with SIP: Working"
 525          echo "  - Key share encryption/decryption: Working"
 526          echo "  - Keychain persistence: Working"
 527          echo "  - Error handling: Working"
 528          echo ""
 529          echo "Your macOS keychain integration is ready for production use!"
 530      else
 531          echo "Some keychain tests failed"
 532          echo ""
 533          echo "Common Issues:"
 534          echo "  - Keychain access denied - grant permission when prompted"
 535          echo "  - System Integrity Protection disabled - enable SIP for security"
 536          echo "  - Missing dependencies - ensure all required crates are built"
 537          echo ""
 538          echo "Check the test output above for specific error details."
 539          exit $TEST_EXIT_CODE
 540      fi
 541  
 542      echo ""
 543      echo "Test Summary:"
 544      echo "  - Platform: macOS (Keychain Services)"
 545      echo "  - Encryption: AES-256-GCM with random nonces"
 546      echo "  - Authentication: Hardware-backed device attestation"
 547      echo "  - Storage: Persistent keychain with access control"
 548      echo "  - Integration: Complete workflow tested"
 549  
 550      echo ""
 551      echo "Security Features Verified:"
 552      echo "  - Hardware-backed key storage"
 553      echo "  - Platform-specific device identification"
 554      echo "  - System Integrity Protection detection"
 555      echo "  - Secure boot verification preparation"
 556      echo "  - Device attestation with Ed25519 signatures"
 557      echo "  - Keychain access control integration"
 558  
 559      echo ""
 560      echo "Next Steps:"
 561      echo "  - Run 'just init-account' to test with the new secure storage"
 562      echo "  - Use 'just status' to verify keychain integration"
 563      echo "  - Deploy with confidence knowing keys are hardware-protected"
 564  
 565  # Check architectural layer compliance (all checks)
 566  check-arch *FLAGS:
 567      scripts/check-arch.sh {{FLAGS}} || true
 568  
 569  # Quick architectural checks by category
 570  check-arch-layers:
 571      scripts/check-arch.sh --layers || true
 572  
 573  check-arch-effects:
 574      scripts/check-arch.sh --effects || true
 575  
 576  check-arch-deps:
 577      scripts/check-arch.sh --deps || true
 578  
 579  check-arch-completeness:
 580      scripts/check-arch.sh --completeness || true
 581  
 582  check-arch-todos:
 583      scripts/check-arch.sh --todos || true
 584  
 585  check-arch-concurrency:
 586      scripts/check-arch.sh --concurrency || true
 587  
 588  # Run CI checks locally (dry-run of GitHub CI workflow)
 589  ci-dry-run:
 590      #!/usr/bin/env bash
 591      set -euo pipefail
 592  
 593      echo "Running CI Dry-Run (Local GitHub Workflow Simulation)"
 594      echo "======================================================"
 595      echo ""
 596  
 597      # Colors for output
 598      GREEN='\033[0;32m'
 599      RED='\033[0;31m'
 600      YELLOW='\033[0;33m'
 601      NC='\033[0m' # No Color
 602  
 603      exit_code=0
 604  
 605      # Clean ALL build artifacts to ensure fresh compilation (matches GitHub CI behavior)
 606      echo "Cleaning build cache to ensure fresh compilation..."
 607      cargo clean
 608      echo ""
 609  
 610      # 1. Format Check
 611      echo "[1/8] Running Format Check..."
 612      if cargo fmt --all -- --check; then
 613          echo -e "${GREEN}[OK]${NC} Format check passed"
 614      else
 615          echo -e "${RED}[FAIL]${NC} Format check failed"
 616          exit_code=1
 617      fi
 618      echo ""
 619  
 620      # 2. Clippy with Effects Enforcement
 621      echo "[2/8] Running Clippy with Effects Enforcement..."
 622      if cargo clippy --workspace --all-targets --verbose -- \
 623          -D warnings \
 624          -D clippy::disallowed_methods \
 625          -D clippy::disallowed_types \
 626          -D clippy::unwrap_used \
 627          -D clippy::expect_used \
 628          -D clippy::duplicated_attributes; then
 629          echo -e "${GREEN}[OK]${NC} Clippy check passed"
 630      else
 631          echo -e "${RED}[FAIL]${NC} Clippy check failed"
 632          exit_code=1
 633      fi
 634      echo ""
 635  
 636      # 3. Test Suite
 637      echo "[3/8] Running Test Suite..."
 638      if cargo test --workspace --verbose; then
 639          echo -e "${GREEN}[OK]${NC} Test suite passed"
 640      else
 641          echo -e "${RED}[FAIL]${NC} Test suite failed"
 642          exit_code=1
 643      fi
 644      echo ""
 645  
 646      # 4. Check for Effects System Violations (Layer-Aware)
 647      echo "[4/8] Checking for Effects System Violations..."
 648      violations_found=0
 649  
 650      # Layer architecture:
 651      # - Layer 3 (aura-effects): Production handlers - MUST use SystemTime::now(), thread_rng()
 652      # - Layer 6 (aura-agent, aura-simulator): Runtime composition - allowed for instrumentation
 653      # - Layer 7 (aura-terminal): User interface - allowed for TUI/CLI interaction
 654      # - Layer 8 (aura-testkit, tests/): Testing infrastructure - allowed
 655      # - All other layers: MUST use effect traits
 656  
 657      # Check for direct time usage (exclude Layer 3, 6, 7, 8, integration tests, demo code, CLI scenarios, and test modules)
 658      # Note: May include false positives from code in comments or test modules
 659      time_violations=$(rg --type rust "SystemTime::now|Instant::now|chrono::Utc::now" crates/ --line-number \
 660          --glob '!**/aura-effects/**' \
 661          --glob '!**/aura-agent/**' \
 662          --glob '!**/aura-simulator/**' \
 663          --glob '!**/aura-terminal/**' \
 664          --glob '!**/aura-testkit/**' \
 665          --glob '!**/tests/**' \
 666          --glob '!**/integration/**' \
 667          --glob '!**/demo/**' \
 668          --glob '!**/examples/**' 2>/dev/null | \
 669          grep -v '^\s*//\|^\s\+//\|:\s*//' | \
 670          grep -v "#\[tokio::test\]" | \
 671          grep -v "#\[test\]" || true)
 672  
 673      # Filter out lines from files with #[cfg(test)] modules
 674      if [ -n "$time_violations" ]; then
 675          filtered_time=""
 676          while IFS= read -r line; do
 677              [ -z "$line" ] && continue
 678              file_path="${line%%:*}"
 679              if [ -f "$file_path" ] && grep -q "#\[cfg(test)\]" "$file_path" 2>/dev/null; then
 680                  match_line_num=$(echo "$line" | cut -d: -f2)
 681                  cfg_test_line=$(grep -n "#\[cfg(test)\]" "$file_path" 2>/dev/null | head -1 | cut -d: -f1)
 682                  if [ -n "$match_line_num" ] && [ -n "$cfg_test_line" ] && [ "$match_line_num" -gt "$cfg_test_line" ]; then
 683                      continue
 684                  fi
 685              fi
 686              filtered_time="${filtered_time}${line}"$'\n'
 687          done <<< "$time_violations"
 688          time_violations="$filtered_time"
 689      fi
 690  
 691      if [ -n "$time_violations" ] && [ "$time_violations" != $'\n' ]; then
 692          echo "$time_violations"
 693          echo -e "${RED}[ERROR]${NC} Found direct time usage in application code! Use PhysicalTimeEffects::now() instead."
 694          violations_found=1
 695      fi
 696  
 697      # Check for direct randomness usage (exclude Layer 3, 6, 7, 8, integration tests, and demo code)
 698      if rg --type rust "rand::random|thread_rng\(\)|OsRng::new" crates/ --line-number \
 699          --glob '!**/aura-effects/**' \
 700          --glob '!**/aura-agent/**' \
 701          --glob '!**/aura-simulator/**' \
 702          --glob '!**/aura-terminal/**' \
 703          --glob '!**/aura-testkit/**' \
 704          --glob '!**/tests/**' \
 705          --glob '!**/integration/**' \
 706          --glob '!**/demo/**' \
 707          --glob '!**/examples/**' 2>/dev/null; then
 708          echo -e "${RED}[ERROR]${NC} Found direct randomness usage in application code! Use RandomEffects methods instead."
 709          violations_found=1
 710      fi
 711  
 712      # Check for direct UUID usage (exclude Layer 3, 6, 7, 8, integration tests, demo code, ID constructors, property tests, and test modules)
 713      uuid_violations=$(rg --type rust "Uuid::new_v4\(\)" crates/ --line-number \
 714          --glob '!**/aura-effects/**' \
 715          --glob '!**/aura-agent/**' \
 716          --glob '!**/aura-simulator/**' \
 717          --glob '!**/aura-testkit/**' \
 718          --glob '!**/aura-quint/**' \
 719          --glob '!**/aura-terminal/**' \
 720          --glob '!**/tests/**' \
 721          --glob '!**/integration/**' \
 722          --glob '!**/demo/**' \
 723          --glob '!**/tui/**' \
 724          --glob '!**/examples/**' \
 725          --glob '!**/aura-core/src/types/identifiers.rs' \
 726          --glob '!**/aura-core/src/effects/quint.rs' \
 727          --glob '!**/aura-composition/src/registry.rs' \
 728          --glob '!**/aura-sync/src/services/maintenance.rs' \
 729          --glob '!**/aura-sync/src/infrastructure/peers.rs' 2>/dev/null | \
 730          grep -v '^\s*//\|^\s\+//\|:\s*//' | \
 731          grep -v "#\[tokio::test\]" | \
 732          grep -v "#\[test\]" || true)
 733  
 734      # Filter out lines from files with #[cfg(test)] modules
 735      if [ -n "$uuid_violations" ]; then
 736          filtered_uuid=""
 737          while IFS= read -r line; do
 738              [ -z "$line" ] && continue
 739              file_path="${line%%:*}"
 740              if [ -f "$file_path" ] && grep -q "#\[cfg(test)\]" "$file_path" 2>/dev/null; then
 741                  match_line_num=$(echo "$line" | cut -d: -f2)
 742                  cfg_test_line=$(grep -n "#\[cfg(test)\]" "$file_path" 2>/dev/null | head -1 | cut -d: -f1)
 743                  if [ -n "$match_line_num" ] && [ -n "$cfg_test_line" ] && [ "$match_line_num" -gt "$cfg_test_line" ]; then
 744                      continue
 745                  fi
 746              fi
 747              filtered_uuid="${filtered_uuid}${line}"$'\n'
 748          done <<< "$uuid_violations"
 749          uuid_violations="$filtered_uuid"
 750      fi
 751  
 752      if [ -n "$uuid_violations" ] && [ "$uuid_violations" != $'\n' ]; then
 753          echo "$uuid_violations"
 754          echo -e "${RED}[ERROR]${NC} Found direct UUID usage in application code! Use RandomEffects::random_uuid() instead."
 755          violations_found=1
 756      fi
 757  
 758      if [ $violations_found -eq 0 ]; then
 759          echo -e "${GREEN}[OK]${NC} No effects system violations found"
 760      else
 761          echo ""
 762          echo -e "${YELLOW}Note:${NC} Layer 1 ID constructors (aura-core/identifiers.rs), Layer 3 (aura-effects), Layer 6 (aura-agent, aura-simulator), Layer 7 (aura-terminal), Layer 8 (aura-testkit, tests/), property tests (aura-quint), demo/TUI code, operation ID generation (aura-composition/registry.rs), and sync service IDs (aura-sync/services, aura-sync/infrastructure) are exempt."
 763          exit_code=1
 764      fi
 765      echo ""
 766  
 767      # 5. Documentation Links Check
 768      echo "[5/8] Checking Documentation Links..."
 769      # Install markdown-link-check if not available
 770      if ! command -v markdown-link-check &> /dev/null; then
 771          echo "Installing markdown-link-check..."
 772          npm install -g markdown-link-check > /dev/null 2>&1
 773      fi
 774  
 775      doc_errors=0
 776      doc_output=$(mktemp)
 777  
 778      # Find all markdown files recursively (matching GitHub CI behavior)
 779      # Exclude node_modules, target, and .git directories
 780      while IFS= read -r file; do
 781          if [ -f "$file" ]; then
 782              if ! markdown-link-check "$file" --config .github/config/markdown-link-check.json 2>&1 | tee -a "$doc_output" | grep -q "ERROR:"; then
 783                  :
 784              else
 785                  echo -e "${RED}Broken links found in $file${NC}"
 786                  doc_errors=1
 787              fi
 788          fi
 789      done < <(find . -name "*.md" -type f \
 790          ! -path "*/node_modules/*" \
 791          ! -path "*/target/*" \
 792          ! -path "*/.git/*" \
 793          ! -path "*/.aura-test/*" \
 794          ! -path "*/ext/quint/*" \
 795          ! -path "*/work/*" \
 796          ! -path "*/.claude/skills/*")
 797  
 798      if [ $doc_errors -eq 0 ]; then
 799          echo -e "${GREEN}[OK]${NC} Documentation links check passed"
 800      else
 801          echo -e "${RED}[FAIL]${NC} Documentation links check failed"
 802          echo "See errors above for details"
 803          exit_code=1
 804      fi
 805      rm -f "$doc_output"
 806      echo ""
 807  
 808      # 6. Build Check
 809      echo "[6/8] Running Build Check..."
 810      if cargo build --workspace --verbose; then
 811          echo -e "${GREEN}[OK]${NC} Build check passed"
 812      else
 813          echo -e "${RED}[FAIL]${NC} Build check failed"
 814          exit_code=1
 815      fi
 816      echo ""
 817  
 818      # 7. Unused Dependencies Check
 819      echo "[7/8] Checking for Unused Dependencies (cargo-udeps)..."
 820      echo "Using nightly Rust toolchain for cargo-udeps..."
 821  
 822      # Run cargo-udeps using the nightly shell from flake.nix
 823      if nix develop .#nightly --command cargo udeps --all-targets 2>&1 | tee /tmp/udeps-output.txt | grep -q "unused dependencies:"; then
 824          # Found unused dependencies - show them
 825          echo -e "${YELLOW}[WARNING]${NC} Found unused dependencies:"
 826          grep -A 100 "unused dependencies:" /tmp/udeps-output.txt | head -50
 827          echo ""
 828          echo -e "${YELLOW}Note:${NC} cargo-udeps may report false positives for:"
 829          echo "  - Dependencies used in macro-generated code"
 830          echo "  - Dependencies used only in doc-tests"
 831          echo "  - Re-exported dependencies in public APIs"
 832          echo ""
 833          echo "Review the output above and verify these are actual unused dependencies."
 834          echo "This is a WARNING, not a failure - CI will continue."
 835      else
 836          echo -e "${GREEN}[OK]${NC} No unused dependencies found (or only known false positives)"
 837      fi
 838      rm -f /tmp/udeps-output.txt
 839      echo ""
 840  
 841      # 8. Summary
 842      echo "[8/8] Summary"
 843      echo "======================================================"
 844      if [ $exit_code -eq 0 ]; then
 845          echo -e "${GREEN}All CI checks passed!${NC}"
 846          echo "Ready to submit PR - matches GitHub CI requirements"
 847      else
 848          echo -e "${RED}Some CI checks failed!${NC}"
 849          echo "Please fix the issues above before submitting PR"
 850          exit $exit_code
 851      fi
 852  
 853  # Parse Quint file to JSON using native parser
 854  quint-parse input output:
 855      @echo "Parsing Quint file to JSON..."
 856      @echo "Input: {{input}}"
 857      @echo "Output: {{output}}"
 858      nix develop --command quint parse --out {{output}} {{input}}
 859      @echo "Parse completed successfully!"
 860  
 861  # Parse Quint file and display AST structure
 862  quint-parse-ast input:
 863      @echo "Parsing Quint file AST..."
 864      nix develop --command quint parse --out /tmp/quint-ast.json {{input}}
 865      @echo "AST structure for {{input}}:"
 866      @echo "============================"
 867      jq '.modules[0].name as $name | "Module: " + $name' /tmp/quint-ast.json
 868      jq '.modules[0].declarations | length as $count | "Declarations: " + ($count | tostring)' /tmp/quint-ast.json
 869      @echo ""
 870      @echo "Full AST available at: /tmp/quint-ast.json"
 871  
 872  # Parse Quint file with type checking and compile to JSON
 873  quint-compile input output:
 874      @echo "Compiling Quint file with full type checking..."
 875      @echo "Input: {{input}}"
 876      @echo "Output: {{output}}"
 877      nix develop --command quint typecheck {{input}}
 878      nix develop --command quint parse --out {{output}} {{input}}
 879      @echo "Compilation completed successfully!"
 880  
 881  # Regenerate deterministic ITF trace for the TUI replay tests
 882  tui-itf-trace out="verification/traces/tui_trace.itf.json" seed="424242" max_steps="50":
 883      TUI_ITF_SEED={{seed}} TUI_ITF_MAX_STEPS={{max_steps}} nix develop --command scripts/gen-tui-itf-trace.sh {{out}}
 884  
 885  # Check that the checked-in ITF trace matches regeneration
 886  tui-itf-trace-check seed="424242" max_steps="50":
 887      TUI_ITF_SEED={{seed}} TUI_ITF_MAX_STEPS={{max_steps}} nix develop --command scripts/check-tui-itf-trace.sh
 888  
 889  # Test Quint parsing with example file
 890  test-quint-parse:
 891      #!/usr/bin/env bash
 892      set -euo pipefail
 893  
 894      echo "Testing Quint Parsing Capabilities"
 895      echo "==================================="
 896      echo ""
 897  
 898      # Create a test Quint file
 899      mkdir -p .aura-test
 900      cat > .aura-test/test.qnt << 'EOF'
 901      module test {
 902        var counter: int
 903  
 904        action init = {
 905          counter' = 0
 906        }
 907  
 908        action increment = {
 909          counter' = counter + 1
 910        }
 911  
 912        action step = {
 913          increment
 914        }
 915  
 916        val counterInvariant = counter >= 0
 917      }
 918      EOF
 919  
 920      echo "1. Created test Quint file: .aura-test/test.qnt"
 921      echo ""
 922  
 923      echo "2. Parsing to JSON..."
 924      just quint-parse .aura-test/test.qnt .aura-test/test.json
 925      echo ""
 926  
 927      echo "3. Examining parsed structure..."
 928      echo "Main module:"
 929      jq '.main' .aura-test/test.json
 930      echo ""
 931      echo "Module declarations count:"
 932      jq '.modules[0].declarations | length' .aura-test/test.json
 933      echo ""
 934  
 935      echo "4. Testing AST parsing..."
 936      just quint-parse-ast .aura-test/test.qnt
 937      echo ""
 938  
 939      echo "5. Files generated:"
 940      ls -la .aura-test/test.*
 941      echo ""
 942  
 943      echo "Quint parsing test completed successfully!"
 944      echo "JSON output available at: .aura-test/test.json"
 945  
 946  # Verify Quint setup and parsing capabilities
 947  verify-quint:
 948      #!/usr/bin/env bash
 949      set -euo pipefail
 950  
 951      echo "Verifying Quint Setup"
 952      echo "====================="
 953      echo ""
 954  
 955      echo "1. Checking Quint installation..."
 956      nix develop --command quint --version
 957      echo ""
 958  
 959      echo "2. Checking Node.js (required for Quint)..."
 960      nix develop --command node --version
 961      echo ""
 962  
 963      echo "3. Checking Java Runtime (required for ANTLR)..."
 964      nix develop --command java -version
 965      echo ""
 966  
 967      echo "4. Testing basic Quint functionality..."
 968      echo 'module simple { val x = 1 }' > /tmp/simple.qnt
 969      nix develop --command quint parse /tmp/simple.qnt > /dev/null && echo "[OK] Basic parsing works"
 970      echo ""
 971  
 972      echo "Quint setup verification completed!"
 973  
 974  # Typecheck all Quint specs without verification (fast sanity check)
 975  quint-typecheck-all:
 976      #!/usr/bin/env bash
 977      set -uo pipefail
 978  
 979      GREEN='\033[0;32m'
 980      RED='\033[0;31m'
 981      YELLOW='\033[1;33m'
 982      NC='\033[0m'
 983  
 984      echo "Typechecking All Quint Specs"
 985      echo "============================"
 986      echo ""
 987  
 988      SPECS_DIR="verification/quint"
 989      TEST_SPECS_DIR="crates/aura-simulator/tests/quint_specs"
 990  
 991      passed=0
 992      failed=0
 993  
 994      # Typecheck verification/quint/
 995      echo "Checking specs in $SPECS_DIR..."
 996      for spec in $SPECS_DIR/protocol_*.qnt $SPECS_DIR/harness_*.qnt; do
 997          if [ -f "$spec" ]; then
 998              name=$(basename "$spec")
 999              if quint typecheck "$spec" > /dev/null 2>&1; then
1000                  echo -e "  ${GREEN}✓${NC} $name"
1001                  passed=$((passed + 1))
1002              else
1003                  echo -e "  ${RED}✗${NC} $name"
1004                  failed=$((failed + 1))
1005              fi
1006          fi
1007      done
1008  
1009      # Typecheck test specs
1010      echo ""
1011      echo "Checking specs in $TEST_SPECS_DIR..."
1012      for spec in $TEST_SPECS_DIR/*.qnt; do
1013          if [ -f "$spec" ]; then
1014              name=$(basename "$spec")
1015              if quint typecheck "$spec" > /dev/null 2>&1; then
1016                  echo -e "  ${GREEN}✓${NC} $name"
1017                  passed=$((passed + 1))
1018              else
1019                  echo -e "  ${RED}✗${NC} $name"
1020                  failed=$((failed + 1))
1021              fi
1022          fi
1023      done
1024  
1025      echo ""
1026      echo "=============================="
1027      echo -e "Passed: ${GREEN}$passed${NC}"
1028      echo -e "Failed: ${RED}$failed${NC}"
1029  
1030      if [ $failed -gt 0 ]; then
1031          echo -e "${RED}Some specs failed typecheck!${NC}"
1032          exit 1
1033      else
1034          echo -e "${GREEN}All specs passed typecheck!${NC}"
1035      fi
1036  
1037  # Verify Quint specs with Apalache symbolic model checking
1038  # Usage: just quint-verify-all              # Run with defaults
1039  # Usage: just quint-verify-all 5            # Max 5 steps
1040  # Usage: just quint-verify-all 10 traces    # Max 10 steps, output to traces/
1041  quint-verify-all max_steps="5" output_dir="traces/verify":
1042      #!/usr/bin/env bash
1043      set -uo pipefail
1044  
1045      GREEN='\033[0;32m'
1046      RED='\033[0;31m'
1047      YELLOW='\033[1;33m'
1048      NC='\033[0m'
1049  
1050      MAX_STEPS="{{max_steps}}"
1051      OUTPUT_DIR="{{output_dir}}"
1052  
1053      echo "Quint + Apalache Verification"
1054      echo "============================="
1055      echo "Max steps: $MAX_STEPS"
1056      echo "Output dir: $OUTPUT_DIR"
1057      echo ""
1058  
1059      mkdir -p "$OUTPUT_DIR"
1060  
1061      # Specs with their invariants to verify
1062      # Format: spec_file:invariant1,invariant2,...
1063      VERIFY_TARGETS=(
1064          # Journal CRDT properties (proving.md §1)
1065          "verification/quint/protocol_journal.qnt:InvariantNonceUnique,InvariantEventsOrdered,InvariantLamportMonotonic,InvariantReduceDeterministic"
1066  
1067          # Consensus fast-path/fallback (proving.md §1)
1068          "verification/quint/protocol_consensus.qnt:InvariantUniqueCommitPerInstance,InvariantCommitRequiresThreshold,InvariantPathConvergence"
1069  
1070          # Anti-entropy convergence (proving.md §3)
1071          "verification/quint/protocol_anti_entropy.qnt:InvariantFactsMonotonic,InvariantVectorClockConsistent,InvariantEventualConvergence"
1072  
1073          # Recovery safety (proving.md §4)
1074          "verification/quint/protocol_recovery.qnt:InvariantThresholdWithinBounds,InvariantApprovalsSubsetGuardians,InvariantPhaseConsistency"
1075  
1076          # Session management
1077          "verification/quint/protocol_sessions.qnt:InvariantAuthoritiesRegisteredSessions,InvariantRevokedInactive"
1078      )
1079  
1080      passed=0
1081      failed=0
1082      skipped=0
1083  
1084      for target in "${VERIFY_TARGETS[@]}"; do
1085          IFS=':' read -r spec invariants <<< "$target"
1086          spec_name=$(basename "$spec" .qnt)
1087  
1088          if [ ! -f "$spec" ]; then
1089              echo -e "${YELLOW}SKIP${NC} $spec_name (file not found)"
1090              skipped=$((skipped + 1))
1091              continue
1092          fi
1093  
1094          echo ""
1095          echo "Verifying $spec_name..."
1096          echo "  Invariants: $invariants"
1097  
1098          # Convert comma-separated invariants to --invariant flags
1099          inv_flags=""
1100          IFS=',' read -ra INV_ARRAY <<< "$invariants"
1101          for inv in "${INV_ARRAY[@]}"; do
1102              inv_flags="$inv_flags --invariant=$inv"
1103          done
1104  
1105          output_file="$OUTPUT_DIR/${spec_name}_verify.json"
1106  
1107          # Run quint verify with Apalache
1108          if quint verify "$spec" \
1109              $inv_flags \
1110              --max-steps="$MAX_STEPS" \
1111              --out-itf="$OUTPUT_DIR/${spec_name}_counter.itf.json" \
1112              > "$output_file" 2>&1; then
1113              echo -e "  ${GREEN}✓ PASS${NC}"
1114              passed=$((passed + 1))
1115          else
1116              # Check if it's a real failure or just a limitation
1117              if grep -q "no violation found" "$output_file" 2>/dev/null; then
1118                  echo -e "  ${GREEN}✓ PASS${NC} (no violation in $MAX_STEPS steps)"
1119                  passed=$((passed + 1))
1120              elif grep -q "Apalache" "$output_file" 2>/dev/null; then
1121                  echo -e "  ${RED}✗ FAIL${NC}"
1122                  echo "    Counterexample: $OUTPUT_DIR/${spec_name}_counter.itf.json"
1123                  echo "    Full output: $output_file"
1124                  failed=$((failed + 1))
1125              else
1126                  echo -e "  ${YELLOW}? ERROR${NC} (see $output_file)"
1127                  skipped=$((skipped + 1))
1128              fi
1129          fi
1130      done
1131  
1132      echo ""
1133      echo "=============================="
1134      echo -e "Passed:  ${GREEN}$passed${NC}"
1135      echo -e "Failed:  ${RED}$failed${NC}"
1136      echo -e "Skipped: ${YELLOW}$skipped${NC}"
1137      echo ""
1138      echo "Counterexamples (if any) saved to: $OUTPUT_DIR/"
1139  
1140      if [ $failed -gt 0 ]; then
1141          echo -e "${RED}Some invariants were violated!${NC}"
1142          exit 1
1143      else
1144          echo -e "${GREEN}All verified invariants hold (up to $MAX_STEPS steps)${NC}"
1145      fi
1146  
1147  # Verify a single Quint spec with specific invariants
1148  # Usage: just quint-verify verification/quint/protocol_journal.qnt InvariantNonceUnique
1149  quint-verify spec invariants:
1150      #!/usr/bin/env bash
1151      set -uo pipefail
1152  
1153      echo "Verifying {{spec}}..."
1154      echo "Invariants: {{invariants}}"
1155      echo ""
1156  
1157      mkdir -p verification/traces
1158      SPEC_NAME=$(basename "{{spec}}" .qnt)
1159  
1160      # Convert comma-separated to multiple --invariant flags
1161      INV_FLAGS=""
1162      IFS=',' read -ra INV_ARRAY <<< "{{invariants}}"
1163      for inv in "${INV_ARRAY[@]}"; do
1164          INV_FLAGS="$INV_FLAGS --invariant=$inv"
1165      done
1166  
1167      quint verify "{{spec}}" \
1168          $INV_FLAGS \
1169          --max-steps=10 \
1170          --out-itf="verification/traces/${SPEC_NAME}_counter.itf.json" \
1171          --verbosity=3
1172  
1173  # Generate ITF traces from all Quint specs
1174  quint-generate-traces:
1175      #!/usr/bin/env bash
1176      set -uo pipefail
1177  
1178      GREEN='\033[0;32m'
1179      RED='\033[0;31m'
1180      YELLOW='\033[1;33m'
1181      NC='\033[0m'
1182  
1183      echo "Generating ITF Traces from Quint Specs"
1184      echo "======================================"
1185      echo ""
1186  
1187      mkdir -p verification/traces
1188  
1189      # Specs that have step actions defined
1190      RUNNABLE_SPECS=(
1191          "verification/quint/protocol_journal.qnt:journal"
1192          "verification/quint/protocol_consensus.qnt:consensus"
1193          "verification/quint/protocol_anti_entropy.qnt:anti_entropy"
1194          "verification/quint/protocol_epochs.qnt:epochs"
1195          "verification/quint/protocol_cross_interaction.qnt:cross_interaction"
1196          "verification/quint/protocol_capability_properties.qnt:cap_props"
1197          "verification/quint/protocol_frost.qnt:frost"
1198      )
1199  
1200      for target in "${RUNNABLE_SPECS[@]}"; do
1201          IFS=':' read -r spec trace_name <<< "$target"
1202  
1203          if [ ! -f "$spec" ]; then
1204              echo -e "${YELLOW}SKIP${NC} $spec (not found)"
1205              continue
1206          fi
1207  
1208          echo "Generating trace for $trace_name..."
1209          if quint run "$spec" \
1210              --main=step \
1211              --max-steps=15 \
1212              --out-itf="verification/traces/${trace_name}.itf.json" \
1213              > /dev/null 2>&1; then
1214              size=$(du -h "verification/traces/${trace_name}.itf.json" | cut -f1)
1215              echo -e "  ${GREEN}✓${NC} verification/traces/${trace_name}.itf.json ($size)"
1216          else
1217              echo -e "  ${RED}✗${NC} Failed to generate trace"
1218          fi
1219      done
1220  
1221      echo ""
1222      echo "Trace generation complete. Files in verification/traces/"
1223      ls -la verification/traces/*.itf.json 2>/dev/null || echo "No traces generated"
1224  
1225  # Check Quint-Rust type correspondence
1226  # Detects drift between Quint type definitions and Rust QuintMappable implementations
1227  quint-check-types verbose="":
1228      #!/usr/bin/env bash
1229      set -uo pipefail
1230      ./scripts/check-quint-rust-types.sh {{verbose}}
1231  
1232  # Generate verification coverage report
1233  # Usage: just verification-coverage        # Markdown to stdout
1234  # Usage: just verification-coverage --json # JSON metrics
1235  verification-coverage format="--md":
1236      #!/usr/bin/env bash
1237      set -uo pipefail
1238      ./scripts/verification-coverage.sh {{format}}
1239  
1240  # Execute any aura CLI command with nix build
1241  # Usage: just aura init -n 3 -t 2 -o test-account
1242  # Usage: just aura status -c test-account/configs/device_1.toml
1243  # Usage: just aura scenarios list
1244  aura *ARGS='--help':
1245      @AURA_SUPPRESS_NIX_WELCOME=1 nix develop --quiet --command cargo build --bin aura
1246      @AURA_SUPPRESS_NIX_WELCOME=1 nix develop --quiet --command cargo run --bin aura -- {{ARGS}}
1247  
1248  # Generate Cargo.nix using crate2nix (needed for hermetic Nix builds)
1249  generate-cargo-nix:
1250      #!/usr/bin/env bash
1251      set -euo pipefail
1252      echo "Regenerating Cargo.nix with crate2nix..."
1253      nix develop --command crate2nix generate
1254      echo "Cargo.nix regenerated successfully!"
1255      echo ""
1256      echo "Run 'nix build .#aura-terminal' to test hermetic build"
1257  
1258  # Build using hermetic Nix build (requires Cargo.nix to exist)
1259  build-nix:
1260      nix build .#aura-terminal
1261  
1262  # Build specific package with hermetic Nix
1263  build-nix-package package:
1264      nix build .#{{package}}
1265  
1266  # Run hermetic Nix checks
1267  check-nix:
1268      nix flake check
1269  
1270  # Test hermetic build of all available packages
1271  test-nix-all:
1272      #!/usr/bin/env bash
1273      set -euo pipefail
1274      echo "Testing all hermetic Nix builds..."
1275      echo "=================================="
1276      echo ""
1277  
1278      echo "1. Building aura-terminal..."
1279      nix build .#aura-terminal
1280      echo "[OK] aura-terminal built successfully"
1281  
1282      echo "2. Building aura-agent..."
1283      nix build .#aura-agent
1284      echo "[OK] aura-agent built successfully"
1285  
1286      echo "3. Building aura-simulator..."
1287      nix build .#aura-simulator
1288      echo "[OK] aura-simulator built successfully"
1289  
1290      echo ""
1291      echo "All hermetic builds completed successfully!"
1292  
1293  # Generate documentation
1294  docs:
1295      cargo doc --workspace --no-deps --open
1296  
1297  # Watch and serve the console frontend with hot reload
1298  serve-console:
1299      cd crates/console && trunk serve --open
1300  
1301  # Stop any running trunk servers
1302  stop-console:
1303      #!/usr/bin/env bash
1304      if pgrep -x trunk > /dev/null; then
1305          pkill trunk
1306          echo "Stopped trunk server"
1307      else
1308          echo "No trunk server running"
1309      fi
1310  
1311  # Show project statistics
1312  stats:
1313      @echo "Project Statistics"
1314      @echo "=================="
1315      @echo ""
1316      @echo "Lines of Rust code:"
1317      @find crates -name "*.rs" -type f -exec cat {} + | wc -l
1318      @echo ""
1319      @echo "Number of crates:"
1320      @ls -1 crates | wc -l
1321      @echo ""
1322      @echo "Dependencies:"
1323      @cargo tree --workspace --depth 1 | grep -v "└──" | grep -v "├──" | tail -n +2 | wc -l
1324  
1325  # Install git hooks
1326  install-hooks:
1327      @echo "Installing git hooks..."
1328      @echo "#!/usr/bin/env bash" > .git/hooks/pre-commit
1329      @echo "just fmt-check && just clippy-strict" >> .git/hooks/pre-commit
1330      @chmod +x .git/hooks/pre-commit
1331      @echo "Git hooks installed"
1332  
1333  # Build WASM module for db-test
1334  build-wasm-db-test:
1335      #!/usr/bin/env bash
1336      set -euo pipefail
1337  
1338      echo "Building Datafrog WASM module..."
1339      echo "================================"
1340      echo ""
1341  
1342      cd crates/db-test
1343  
1344      wasm-pack build --target web --out-dir web/pkg
1345  
1346      echo ""
1347      echo "WASM build complete!"
1348      echo "Output: crates/db-test/web/pkg/"
1349      echo ""
1350      echo "To test, run: just serve-wasm-db-test"
1351  
1352  # Serve the WASM test application
1353  serve-wasm-db-test:
1354      #!/usr/bin/env bash
1355      set -euo pipefail
1356  
1357      echo "Starting web server for Datafrog WASM test..."
1358      echo "============================================="
1359      echo ""
1360  
1361      if [ ! -d "crates/db-test/web/pkg" ]; then
1362          echo "Error: WASM module not built yet"
1363          echo "Run: just build-wasm-db-test"
1364          exit 1
1365      fi
1366  
1367      echo "Server running at: http://localhost:8000"
1368      echo "Press Ctrl+C to stop"
1369      echo ""
1370  
1371      cd crates/db-test/web
1372      python3 -m http.server 8000
1373  
1374  # Build and serve WASM test in one command
1375  test-wasm-db: build-wasm-db-test serve-wasm-db-test
1376  
1377  # Run scenarios with default settings
1378  run-scenarios:
1379      cargo run --bin aura -- scenarios run --directory scenarios
1380  
1381  # Run scenarios with specific pattern
1382  run-scenarios-pattern pattern:
1383      cargo run --bin aura -- scenarios run --pattern {{pattern}} --directory scenarios
1384  
1385  # Run scenarios in parallel
1386  run-scenarios-parallel:
1387      cargo run --bin aura -- scenarios run --parallel --max-parallel 4 --directory scenarios
1388  
1389  # List all available scenarios
1390  list-scenarios:
1391      cargo run --bin aura -- scenarios list --directory scenarios --detailed
1392  
1393  # Validate all scenarios
1394  validate-scenarios:
1395      cargo run --bin aura -- scenarios validate --directory scenarios --strictness standard
1396  
1397  # Discover scenarios in directory tree
1398  discover-scenarios:
1399      cargo run --bin aura -- scenarios discover --root . --validate
1400  
1401  # Generate HTML report from scenario execution results
1402  generate-report input output:
1403      cargo run --bin aura -- scenarios report --input {{input}} --output {{output}} --format html --detailed
1404  
1405  # Run full scenario test suite (discovery, validation, execution, reporting)
1406  test-scenarios:
1407      #!/usr/bin/env bash
1408      set -euo pipefail
1409  
1410      echo "Running Full Scenario Test Suite"
1411      echo "================================"
1412      echo ""
1413  
1414      echo "1. Discovering scenarios..."
1415      just discover-scenarios
1416      echo ""
1417  
1418      echo "2. Validating scenarios..."
1419      just validate-scenarios
1420      echo ""
1421  
1422      echo "3. Running scenarios..."
1423      cargo run --bin aura -- scenarios run --directory scenarios --output-file outcomes/scenario_results.json --detailed-report
1424      echo ""
1425  
1426      echo "4. Generating report..."
1427      just generate-report outcomes/scenario_results.json outcomes/scenario_report.html
1428      echo ""
1429  
1430      echo "Scenario test suite completed!"
1431      echo "Report available at: outcomes/scenario_report.html"
1432  
1433  # Demonstrate the complete Quint to JSON to simulator pipeline
1434  test-quint-pipeline:
1435      #!/usr/bin/env bash
1436      set -euo pipefail
1437  
1438      echo "Testing Quint Specification to Simulator Pipeline"
1439      echo "=================================================="
1440      echo ""
1441  
1442      # Clean up any previous test artifacts
1443      rm -f /tmp/quint_pipeline_test.json
1444      mkdir -p .aura-test
1445  
1446      echo "1. Converting Quint specification to JSON..."
1447      echo "   Input: verification/quint/protocol_dkd.qnt"
1448      echo "   Output: /tmp/quint_pipeline_test.json"
1449      echo ""
1450      just quint-parse verification/quint/protocol_dkd.qnt /tmp/quint_pipeline_test.json
1451      echo ""
1452  
1453      echo "2. Verifying JSON output structure..."
1454      if [ -f "/tmp/quint_pipeline_test.json" ]; then
1455          echo "   [OK] JSON file created successfully"
1456          echo "   Size: $(stat -f%z /tmp/quint_pipeline_test.json 2>/dev/null || stat -c%s /tmp/quint_pipeline_test.json) bytes"
1457  
1458          # Extract key information from the JSON
1459          echo "   Module name: $(jq -r '.modules[0].name' /tmp/quint_pipeline_test.json)"
1460          echo "   Declarations: $(jq '.modules[0].declarations | length' /tmp/quint_pipeline_test.json)"
1461          echo "   Variables: $(jq '.modules[0].declarations | map(select(.kind == "var")) | length' /tmp/quint_pipeline_test.json)"
1462          echo "   Actions: $(jq '.modules[0].declarations | map(select(.qualifier == "action")) | length' /tmp/quint_pipeline_test.json)"
1463          echo "   Properties: $(jq '.modules[0].declarations | map(select(.qualifier == "val")) | length' /tmp/quint_pipeline_test.json)"
1464      else
1465          echo "   [ERROR] JSON file not created"
1466          exit 1
1467      fi
1468      echo ""
1469  
1470      echo "3. Testing JSON can be consumed by simulator..."
1471      echo "   The JSON output contains all necessary information for the simulator:"
1472      echo "   - State variables: sessionCount, completedSessions, failedSessions"
1473      echo "   - Actions: startSession, completeSession, failSession, init, step"
1474      echo "   - Properties: validCounts, sessionLimit, safetyProperty, progressProperty"
1475      echo ""
1476  
1477      echo "4. Verifying existing integration..."
1478      if [ -f "/tmp/dkd_spec.json" ]; then
1479          echo "   [OK] Existing DKD spec JSON found and ready for simulator"
1480          echo "   Size: $(stat -f%z /tmp/dkd_spec.json 2>/dev/null || stat -c%s /tmp/dkd_spec.json) bytes"
1481          echo "   Created: $(stat -f%Sm /tmp/dkd_spec.json 2>/dev/null || stat -c%y /tmp/dkd_spec.json)"
1482      else
1483          echo "   [INFO] No existing DKD spec JSON found - creating one"
1484          just quint-parse verification/quint/protocol_dkd.qnt /tmp/dkd_spec.json
1485      fi
1486      echo ""
1487  
1488      echo "5. Pipeline verification complete!"
1489      echo "   Quint to JSON conversion: Working"
1490      echo "   JSON structure validation: Working"
1491      echo "   Simulator integration points: Ready"
1492      echo ""
1493      echo "Available commands for the pipeline:"
1494      echo "   just quint-parse <input.qnt> <output.json>  - Convert any Quint spec to JSON"
1495      echo "   just quint-compile <input.qnt> <output.json> - Full compile with type checking"
1496      echo "   just test-quint-parse                       - Test with a simple example"
1497      echo ""
1498      echo "The converted JSON can be consumed by the simulator tests that expect"
1499      echo "Quint specification files at /tmp/dkd_spec.json"
1500  
1501  # ============================================================================
1502  # Kani Bounded Model Checking
1503  # ============================================================================
1504  
1505  # Run Kani verification on consensus module (requires nightly shell)
1506  # First time setup: just kani-setup
1507  kani package="aura-protocol" unwind="10":
1508      #!/usr/bin/env bash
1509      set -euo pipefail
1510      echo "Running Kani bounded model checking on {{package}}..."
1511      echo "Unwind bound: {{unwind}}"
1512      echo ""
1513      # Use nix develop .#nightly to get the correct environment
1514      nix develop .#nightly --command cargo kani --package {{package}} --default-unwind {{unwind}}
1515  
1516  # Run a specific Kani harness
1517  kani-harness harness package="aura-protocol" unwind="10":
1518      #!/usr/bin/env bash
1519      set -euo pipefail
1520      echo "Running Kani harness: {{harness}}"
1521      nix develop .#nightly --command cargo kani --package {{package}} --harness {{harness}} --default-unwind {{unwind}}
1522  
1523  # Setup Kani (first time only)
1524  kani-setup:
1525      #!/usr/bin/env bash
1526      set -euo pipefail
1527      echo "Setting up Kani verifier..."
1528      echo ""
1529      echo "Step 1: Installing kani-verifier..."
1530      nix develop .#nightly --command cargo install --locked kani-verifier
1531      echo ""
1532      echo "Step 2: Running Kani setup (downloads CBMC and toolchain)..."
1533      nix develop .#nightly --command cargo kani setup
1534      echo ""
1535      echo "✓ Kani setup complete!"
1536      echo ""
1537      echo "Run 'just kani' to verify the consensus module."
1538  
1539  # Run full Kani verification suite with summary output
1540  # Logs detailed output to logs/kani/, surfaces only pass/fail
1541  kani-suite:
1542      @./scripts/run-kani-suite.sh
1543  
1544  # ============================================================================
1545  # Lean Verification Tasks
1546  # ============================================================================
1547  
1548  # Initialize Lean project (run once or after clean)
1549  lean-init:
1550      @echo "Initializing Lean project..."
1551      cd verification/lean && lake update
1552  
1553  # Build Lean verification modules
1554  # Usage: just lean-build [jobs]
1555  #   jobs: number of parallel threads (default: 2 for safe resource usage)
1556  lean-build jobs="2": lean-init
1557      @echo "Building Lean verification modules (threads={{jobs}})..."
1558      cd verification/lean && nice -n 15 lake build -K env.LEAN_THREADS={{jobs}}
1559  
1560  # Build the Lean oracle verifier CLI for differential testing
1561  lean-oracle-build: lean-init
1562      #!/usr/bin/env bash
1563      set -euo pipefail
1564  
1565      GREEN='\033[0;32m'
1566      YELLOW='\033[1;33m'
1567      NC='\033[0m'
1568  
1569      # Store project root before changing directories
1570      PROJECT_ROOT="$(pwd)"
1571  
1572      echo "Building Lean oracle verifier..."
1573      cd verification/lean && lake build aura_verifier
1574  
1575      BINARY="$PROJECT_ROOT/verification/lean/.lake/build/bin/aura_verifier"
1576      if [ -f "$BINARY" ]; then
1577          echo -e "${GREEN}✓ Lean oracle built successfully${NC}"
1578          echo "  Binary: $BINARY"
1579          VERSION=$("$BINARY" version 2>/dev/null | grep -o '"version":"[^"]*"' | cut -d'"' -f4 || echo "unknown")
1580          echo "  Version: $VERSION"
1581      else
1582          echo -e "${YELLOW}⚠ Binary not found at expected location${NC}"
1583          echo "  Expected: $BINARY"
1584          exit 1
1585      fi
1586  
1587  # Run differential tests against Lean oracle
1588  test-differential: lean-oracle-build
1589      @echo "Running differential tests against Lean oracle..."
1590      cargo test -p aura-testkit --features lean --test lean_differential -- --ignored --nocapture
1591  
1592  # Run Lean verification (build and check for errors)
1593  # Usage: just lean-check [jobs]
1594  lean-check jobs="4": (lean-build jobs)
1595      #!/usr/bin/env bash
1596      set -uo pipefail
1597  
1598      GREEN='\033[0;32m'
1599      YELLOW='\033[1;33m'
1600      NC='\033[0m'
1601  
1602      echo "Checking Lean proof status..."
1603      echo ""
1604  
1605      # Check for sorry usage (incomplete proofs)
1606      if grep -r "sorry" verification/lean/Aura --include="*.lean" > /tmp/sorry-check.txt 2>/dev/null; then
1607          count=$(wc -l < /tmp/sorry-check.txt | tr -d ' ')
1608          echo -e "${YELLOW}⚠ Found $count incomplete proofs (sorry):${NC}"
1609          head -10 /tmp/sorry-check.txt | sed 's/^/  /'
1610          if [ "$count" -gt 10 ]; then
1611              echo "  ... and $(($count - 10)) more"
1612          fi
1613      else
1614          echo -e "${GREEN}✓ All proofs complete (no sorry found)${NC}"
1615      fi
1616  
1617  # Clean Lean build artifacts
1618  lean-clean:
1619      @echo "Cleaning Lean artifacts..."
1620      cd verification/lean && lake clean
1621  
1622  # Full Lean workflow (clean, build, verify)
1623  lean-full: lean-clean lean-build lean-check
1624      @echo "Lean verification complete!"
1625  
1626  # Show Lean proof status summary
1627  lean-status:
1628      #!/usr/bin/env bash
1629      set -uo pipefail
1630  
1631      GREEN='\033[0;32m'
1632      YELLOW='\033[1;33m'
1633      NC='\033[0m'
1634  
1635      echo "Lean Proof Status"
1636      echo "================="
1637      echo ""
1638  
1639      LEAN_DIR="verification/lean"
1640  
1641      if [ ! -d "$LEAN_DIR" ]; then
1642          echo "No Lean directory found at $LEAN_DIR"
1643          exit 0
1644      fi
1645  
1646      echo "Modules:"
1647      find "$LEAN_DIR/Aura" -name "*.lean" -type f | sort | while read -r f; do
1648          name=$(basename "$f" .lean)
1649          dir=$(dirname "$f" | sed "s|$LEAN_DIR/Aura/||")
1650          if [ "$dir" != "$LEAN_DIR/Aura" ] && [ -n "$dir" ]; then
1651              display="$dir/$name"
1652          else
1653              display="$name"
1654          fi
1655          sorries=$(grep -c "sorry" "$f" 2>/dev/null || true)
1656          sorries=${sorries:-0}
1657          if [ "$sorries" -gt 0 ] 2>/dev/null; then
1658              echo -e "  ${YELLOW}○${NC} $display ($sorries incomplete)"
1659          else
1660              echo -e "  ${GREEN}●${NC} $display"
1661          fi
1662      done
1663  
1664      echo ""
1665      echo "Run 'just lean-check' to build and verify proofs"
1666  
1667  # Translate pure Rust functions to Lean using Charon + Aeneas
1668  # Workflow: Rust → Charon → LLBC → Aeneas → Lean
1669  # Usage: just lean-translate [jobs] [crate]
1670  #   jobs: number of parallel jobs (default: 1 for safe resource usage)
1671  #   crate: specific crate to translate (default: all)
1672  # Example: just lean-translate 2 aura-core
1673  #
1674  # Resource management:
1675  #   - Uses nice -n 19 for lowest CPU priority
1676  #   - Limits cargo parallelism with -j flag
1677  #   - Codegen units set to 1 to reduce memory per rustc process
1678  #   - On macOS, monitor with: watch -n1 'ps aux | grep -E "charon|aeneas|rustc" | head -5'
1679  lean-translate jobs="1" crate="all":
1680      #!/usr/bin/env bash
1681      set -uo pipefail
1682  
1683      GREEN='\033[0;32m'
1684      YELLOW='\033[1;33m'
1685      RED='\033[0;31m'
1686      NC='\033[0m'
1687  
1688      JOBS="{{jobs}}"
1689      TARGET_CRATE="{{crate}}"
1690  
1691      # Set environment variables for resource limiting
1692      export CARGO_BUILD_JOBS="$JOBS"
1693      export RUSTFLAGS="${RUSTFLAGS:-} -C codegen-units=1"  # Reduce memory per rustc
1694  
1695      echo "Translating Rust to Lean using Charon + Aeneas"
1696      echo "==============================================="
1697      echo "Resource settings:"
1698      echo "  - Parallel jobs: $JOBS"
1699      echo "  - CPU priority: nice -n 19 (lowest)"
1700      echo "  - Codegen units: 1 (reduces memory)"
1701      echo ""
1702      echo "Tip: To monitor resource usage, run in another terminal:"
1703      echo "  watch -n1 'ps aux | grep -E \"charon|aeneas|rustc\" | head -5'"
1704      echo ""
1705  
1706      OUTPUT_DIR="verification/lean/Generated"
1707      LLBC_DIR="target/llbc"
1708      mkdir -p "$OUTPUT_DIR" "$LLBC_DIR"
1709  
1710      # Check if charon and aeneas are available
1711      if ! command -v charon &> /dev/null; then
1712          echo -e "${RED}✗ Charon not found in PATH${NC}"
1713          echo "  Run 'nix develop' to enter the development environment"
1714          exit 1
1715      fi
1716  
1717      if ! command -v aeneas &> /dev/null; then
1718          echo -e "${RED}✗ Aeneas not found in PATH${NC}"
1719          echo "  Run 'nix develop' to enter the development environment"
1720          exit 1
1721      fi
1722  
1723      echo "Charon: $(charon version 2>/dev/null || echo 'available')"
1724      echo "Aeneas: available (use -help for options)"
1725      echo ""
1726  
1727      # Crates to translate (containing pure/ modules)
1728      if [ "$TARGET_CRATE" = "all" ]; then
1729          CRATES=(
1730              "aura-core"
1731              "aura-journal"
1732          )
1733      else
1734          CRATES=("$TARGET_CRATE")
1735      fi
1736  
1737      SUCCESS=0
1738      FAILED=0
1739  
1740      for crate in "${CRATES[@]}"; do
1741          echo "=== Translating $crate ==="
1742          echo ""
1743  
1744          # Step 1: Compile with Charon to LLBC
1745          # Use nice for low priority, -j for limited parallelism
1746          echo -n "  [1/2] Compiling to LLBC with Charon (jobs=$JOBS)... "
1747          llbc_file="$LLBC_DIR/${crate//-/_}.llbc"
1748  
1749          if nice -n 19 charon cargo --dest "$LLBC_DIR" -- -p "$crate" -j "$JOBS" 2>/tmp/charon_err.log; then
1750              echo -e "${GREEN}✓${NC}"
1751          else
1752              echo -e "${RED}✗${NC}"
1753              echo "    Error: $(head -5 /tmp/charon_err.log)"
1754              ((FAILED++))
1755              continue
1756          fi
1757  
1758          # Step 2: Translate with Aeneas to Lean
1759          echo -n "  [2/2] Translating to Lean with Aeneas... "
1760          crate_out="$OUTPUT_DIR/${crate//-/_}"
1761          mkdir -p "$crate_out"
1762  
1763          if nice -n 19 aeneas -backend lean "$llbc_file" -dest "$crate_out" 2>/tmp/aeneas_err.log; then
1764              echo -e "${GREEN}✓${NC}"
1765              ((SUCCESS++))
1766          else
1767              echo -e "${YELLOW}⚠${NC} (check output)"
1768              echo "    Note: $(head -3 /tmp/aeneas_err.log)"
1769              # Count as partial success if files were generated
1770              if [ -n "$(find "$crate_out" -name '*.lean' 2>/dev/null)" ]; then
1771                  ((SUCCESS++))
1772              else
1773                  ((FAILED++))
1774              fi
1775          fi
1776          echo ""
1777      done
1778  
1779      echo "Summary: $SUCCESS crates translated, $FAILED failed"
1780      echo ""
1781      echo "Generated Lean files: $OUTPUT_DIR/"
1782      if [ -d "$OUTPUT_DIR" ]; then
1783          find "$OUTPUT_DIR" -name "*.lean" -type f | head -10
1784          count=$(find "$OUTPUT_DIR" -name "*.lean" -type f | wc -l | tr -d ' ')
1785          if [ "$count" -gt 10 ]; then
1786              echo "  ... and $(($count - 10)) more"
1787          fi
1788      fi
1789  
1790  # Verify translated Lean code compiles
1791  # Usage: just lean-verify-translated [jobs] [crate]
1792  lean-verify-translated jobs="2" crate="all": (lean-translate jobs crate) lean-init
1793      #!/usr/bin/env bash
1794      set -uo pipefail
1795  
1796      GREEN='\033[0;32m'
1797      YELLOW='\033[1;33m'
1798      NC='\033[0m'
1799  
1800      echo "Verifying translated Lean code..."
1801  
1802      GEN_DIR="verification/lean/Generated"
1803      if [ ! -d "$GEN_DIR" ]; then
1804          echo -e "${YELLOW}⚠ No generated code found in $GEN_DIR${NC}"
1805          exit 0
1806      fi
1807  
1808      # Count generated files
1809      count=$(find "$GEN_DIR" -name "*.lean" -type f | wc -l | tr -d ' ')
1810      if [ "$count" -eq 0 ]; then
1811          echo -e "${YELLOW}⚠ No .lean files found in $GEN_DIR${NC}"
1812          exit 0
1813      fi
1814  
1815      echo "Found $count generated Lean files"
1816      echo ""
1817  
1818      # Try to build the generated code with lake
1819      cd verification/lean
1820      if lake build Generated 2>/dev/null; then
1821          echo -e "${GREEN}✓ All translated code compiles${NC}"
1822      else
1823          echo -e "${YELLOW}⚠ Some translated code has errors${NC}"
1824          echo "  Run 'cd verification/lean && lake build Generated' for details"
1825      fi
1826  
1827  # =============================================================================
1828  # On-Demand Verification Commands (matches conditional CI jobs)
1829  # =============================================================================
1830  # These commands mirror the verification jobs in GitHub CI that only run
1831  # on main push, specific labels, or when verification files change.
1832  # Use these when working on verification code or before major releases.
1833  
1834  # Lean formal verification (matches CI lean-proofs job)
1835  verify-lean:
1836      #!/usr/bin/env bash
1837      set -euo pipefail
1838  
1839      GREEN='\033[0;32m'
1840      YELLOW='\033[1;33m'
1841      RED='\033[0;31m'
1842      NC='\033[0m'
1843  
1844      echo "Lean Formal Verification"
1845      echo "========================"
1846      echo ""
1847  
1848      echo "[1/2] Building Lean proofs..."
1849      cd verification/lean
1850      if lake build; then
1851          echo -e "${GREEN}[OK]${NC} Lean proofs build successfully"
1852      else
1853          echo -e "${RED}[FAIL]${NC} Lean proofs failed to build"
1854          exit 1
1855      fi
1856      echo ""
1857  
1858      echo "[2/2] Checking for incomplete proofs (sorry usage)..."
1859      cd ../..
1860      if grep -r "sorry" verification/lean/Aura --include="*.lean" 2>/dev/null; then
1861          echo -e "${YELLOW}[WARNING]${NC} Found incomplete proofs (sorry). These should be resolved."
1862      else
1863          echo -e "${GREEN}[OK]${NC} All proofs complete (no sorry found)"
1864      fi
1865      echo ""
1866  
1867      echo -e "${GREEN}Lean verification complete${NC}"
1868  
1869  # Quint model checking with invariant verification (matches CI quint-model-checking job)
1870  quint-verify-models:
1871      #!/usr/bin/env bash
1872      set -euo pipefail
1873  
1874      GREEN='\033[0;32m'
1875      RED='\033[0;31m'
1876      NC='\033[0m'
1877  
1878      echo "Quint Model Checking"
1879      echo "===================="
1880      echo ""
1881  
1882      cd verification/quint
1883  
1884      echo "[1/2] Typechecking all Quint specifications..."
1885      for spec in *.qnt; do
1886          echo "  Checking $spec..."
1887          if ! quint typecheck "$spec"; then
1888              echo -e "${RED}[FAIL]${NC} Typecheck failed for $spec"
1889              exit 1
1890          fi
1891      done
1892      echo -e "${GREEN}[OK]${NC} All Quint specs typecheck"
1893      echo ""
1894  
1895      echo "[2/2] Running Quint invariant verification..."
1896  
1897      echo "  Verifying protocol_consensus.qnt..."
1898      if ! quint verify --invariant=AllInvariants protocol_consensus.qnt --max-samples=1000; then
1899          echo -e "${RED}[FAIL]${NC} protocol_consensus.qnt invariants failed"
1900          exit 1
1901      fi
1902  
1903      echo "  Verifying protocol_consensus_adversary.qnt..."
1904      if ! quint verify --invariant=InvariantByzantineThreshold protocol_consensus_adversary.qnt --max-samples=500; then
1905          echo -e "${RED}[FAIL]${NC} protocol_consensus_adversary.qnt invariants failed"
1906          exit 1
1907      fi
1908  
1909      echo "  Verifying protocol_consensus_liveness.qnt..."
1910      if ! quint verify --invariant=InvariantProgressUnderSynchrony protocol_consensus_liveness.qnt --max-samples=500; then
1911          echo -e "${RED}[FAIL]${NC} protocol_consensus_liveness.qnt invariants failed"
1912          exit 1
1913      fi
1914  
1915      echo -e "${GREEN}[OK]${NC} All Quint invariants pass model checking"
1916      echo ""
1917      echo -e "${GREEN}Quint verification complete${NC}"
1918  
1919  # Consensus conformance tests (matches CI consensus-conformance job)
1920  verify-conformance:
1921      #!/usr/bin/env bash
1922      set -euo pipefail
1923  
1924      GREEN='\033[0;32m'
1925      RED='\033[0;31m'
1926      NC='\033[0m'
1927  
1928      echo "Consensus Conformance Tests"
1929      echo "==========================="
1930      echo ""
1931  
1932      echo "[1/3] Generating fresh ITF traces from Quint spec..."
1933      mkdir -p traces
1934      if ! quint run --out-itf=traces/consensus.itf.json verification/quint/protocol_consensus.qnt --max-steps=30 --max-samples=5; then
1935          echo -e "${RED}[FAIL]${NC} Failed to generate ITF traces"
1936          exit 1
1937      fi
1938      echo -e "${GREEN}[OK]${NC} Generated fresh ITF traces"
1939      echo ""
1940  
1941      echo "[2/3] Running ITF conformance tests..."
1942      if ! cargo test -p aura-protocol --test consensus_itf_conformance -- --nocapture; then
1943          echo -e "${RED}[FAIL]${NC} ITF conformance tests failed"
1944          exit 1
1945      fi
1946      echo -e "${GREEN}[OK]${NC} ITF conformance tests passed"
1947      echo ""
1948  
1949      echo "[3/3] Running differential tests (production vs reference)..."
1950      if ! cargo test -p aura-protocol --test consensus_differential -- --nocapture; then
1951          echo -e "${RED}[FAIL]${NC} Differential tests failed"
1952          exit 1
1953      fi
1954      echo -e "${GREEN}[OK]${NC} Differential tests passed"
1955      echo ""
1956  
1957      echo -e "${GREEN}Consensus conformance complete${NC}"
1958  
1959  # Run all verification (Lean + Quint + Conformance)
1960  verify-all: verify-lean quint-verify-models verify-conformance
1961      #!/usr/bin/env bash
1962      echo ""
1963      echo "========================================"
1964      echo "ALL VERIFICATION COMPLETE"
1965      echo "========================================"