/ tooling / shadow-runner / filter.py
filter.py
  1  #!/usr/bin/env python3
  2  """
  3  Shadow Runner Filter - Decision logic for when to use async execution.
  4  
  5  Classifies repos by test duration and provides should_use_shadow() to
  6  determine when background execution is appropriate.
  7  """
  8  
  9  from typing import Dict, Tuple
 10  
 11  # Repo classification by test duration
 12  REPO_SPEED: Dict[str, list] = {
 13      "fast": [
 14          "acdc-core", "ac-dc", "sdk", "alpha-delta-context", "adl-examples",
 15          "acdc-wallet", "acdc-governor", "acdc-messenger", "acdc-scanner",
 16          "acdc-docs", "acdc-design", "acdc-i18n", "ci-dashboard", "acdc-home",
 17      ],
 18      "medium": [
 19          "adnet", "adl", "wallet-core", "acdc-cli", "acdc-contracts", "acdc-forge",
 20      ],
 21      "slow": [
 22          "alphavm", "deltavm", "alphaos", "deltaos",
 23      ],
 24  }
 25  
 26  # Flatten for quick lookup
 27  FAST_REPOS = set(REPO_SPEED["fast"])
 28  MEDIUM_REPOS = set(REPO_SPEED["medium"])
 29  SLOW_REPOS = set(REPO_SPEED["slow"])
 30  ALL_REPOS = FAST_REPOS | MEDIUM_REPOS | SLOW_REPOS
 31  
 32  
 33  def get_speed(repo: str) -> str:
 34      """Get speed classification for a repo."""
 35      if repo in FAST_REPOS:
 36          return "fast"
 37      if repo in MEDIUM_REPOS:
 38          return "medium"
 39      if repo in SLOW_REPOS:
 40          return "slow"
 41      return "unknown"
 42  
 43  
 44  def should_use_shadow(repo: str, context: Dict = None) -> Tuple[bool, str]:
 45      """
 46      Determine whether to use shadow runner for a given repo and context.
 47  
 48      Returns:
 49          (should_use: bool, reason: str)
 50      """
 51      context = context or {}
 52  
 53      # Fast repos - never use shadow (tests < 10min)
 54      if repo in FAST_REPOS:
 55          return False, f"Fast repo ({repo}) - tests complete in <10min"
 56  
 57      # Interactive debugging - user needs immediate feedback
 58      if context.get("interactive"):
 59          return False, "Interactive session - use sync for immediate feedback"
 60  
 61      # Single test file - targeted tests are fast
 62      if context.get("single_test"):
 63          return False, "Single test specified - use sync for targeted execution"
 64  
 65      # Small changes with unit tests only - quick iteration
 66      files_changed = context.get("files_changed", 0)
 67      if files_changed <= 2 and context.get("unit_tests_only"):
 68          return False, f"Small change ({files_changed} files) with unit tests - use sync"
 69  
 70      # Slow repos - always use shadow (60-90min tests)
 71      if repo in SLOW_REPOS:
 72          return True, f"Slow repo ({repo}) - tests take 60-90min, use async"
 73  
 74      # Medium repos with full test suite - use shadow (30-45min)
 75      if repo in MEDIUM_REPOS and context.get("full_suite", True):
 76          return True, f"Medium repo ({repo}) full suite - tests take 30-45min, use async"
 77  
 78      # Medium repos with partial tests - sync is fine
 79      if repo in MEDIUM_REPOS:
 80          return False, f"Medium repo ({repo}) partial tests - use sync"
 81  
 82      # Unknown repo - default to sync for predictability
 83      return False, f"Unknown repo ({repo}) - default to sync for predictability"
 84  
 85  
 86  def get_estimated_duration(repo: str) -> str:
 87      """Get estimated test duration for a repo."""
 88      speed = get_speed(repo)
 89      durations = {
 90          "fast": "<10 minutes",
 91          "medium": "30-45 minutes",
 92          "slow": "60-90 minutes",
 93          "unknown": "unknown",
 94      }
 95      return durations.get(speed, "unknown")
 96  
 97  
 98  if __name__ == "__main__":
 99      # Quick self-test
100      import json
101  
102      test_cases = [
103          ("alphavm", {}, True),
104          ("acdc-core", {}, False),
105          ("adnet", {"interactive": True}, False),
106          ("adnet", {}, True),
107          ("deltavm", {"single_test": True}, False),
108      ]
109  
110      print("Filter self-test:")
111      for repo, ctx, expected in test_cases:
112          result, reason = should_use_shadow(repo, ctx)
113          status = "PASS" if result == expected else "FAIL"
114          print(f"  [{status}] {repo}: {result} ({reason})")