/ examples / benchmark / benchmark_example.py
benchmark_example.py
  1  #!/usr/bin/env python3
  2  """
  3  PraisonAI Benchmark Example
  4  
  5  This example demonstrates how to use the benchmark system programmatically
  6  to compare performance across different execution paths.
  7  
  8  Usage:
  9      python benchmark_example.py
 10      
 11  Requirements:
 12      - OPENAI_API_KEY environment variable set
 13      - praisonai package installed
 14  """
 15  
 16  import os
 17  import sys
 18  
 19  # Ensure the package is importable
 20  sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../src/praisonai'))
 21  
 22  from praisonai.cli.features.benchmark import BenchmarkHandler, BenchmarkReport
 23  
 24  
 25  def run_quick_comparison():
 26      """Run a quick comparison of key execution paths."""
 27      print("=" * 60)
 28      print("Quick Benchmark Comparison")
 29      print("=" * 60)
 30      
 31      handler = BenchmarkHandler()
 32      
 33      # Run benchmark with 2 iterations on key paths
 34      report = handler.run_full_benchmark(
 35          prompt="What is 2+2?",
 36          iterations=2,
 37          paths=["openai_sdk", "praisonai_agent"],
 38          output="verbose"
 39      )
 40      
 41      # Print comparison table
 42      print("\n" + handler.create_comparison_table(report))
 43      
 44      return report
 45  
 46  
 47  def run_full_benchmark():
 48      """Run full benchmark suite across all paths."""
 49      print("=" * 60)
 50      print("Full Benchmark Suite")
 51      print("=" * 60)
 52      
 53      handler = BenchmarkHandler()
 54      
 55      # Run full benchmark
 56      report = handler.run_full_benchmark(
 57          prompt="Hi",
 58          iterations=3,
 59          output="verbose"
 60      )
 61      
 62      # Print full report
 63      handler.print_report(report)
 64      
 65      return report
 66  
 67  
 68  def run_agent_benchmark():
 69      """Benchmark PraisonAI Agent specifically."""
 70      print("=" * 60)
 71      print("Agent Benchmark")
 72      print("=" * 60)
 73      
 74      handler = BenchmarkHandler()
 75      
 76      # Benchmark agent vs SDK
 77      report = handler.run_full_benchmark(
 78          prompt="Explain Python in one sentence",
 79          iterations=3,
 80          paths=["openai_sdk", "praisonai_agent"],
 81          output="verbose"
 82      )
 83      
 84      # Show timeline diagrams
 85      for name, result in report.results.items():
 86          print(f"\n### {name}")
 87          print(handler.create_timeline_diagram(result))
 88      
 89      # Show variance analysis
 90      print(handler.create_variance_table(report))
 91      
 92      return report
 93  
 94  
 95  def save_benchmark_results(report: BenchmarkReport, output_path: str):
 96      """Save benchmark results to JSON file."""
 97      import json
 98      
 99      with open(output_path, 'w') as f:
100          json.dump(report.to_dict(), f, indent=2)
101      
102      print(f"\nResults saved to: {output_path}")
103  
104  
105  def main():
106      """Main entry point."""
107      import argparse
108      
109      parser = argparse.ArgumentParser(description="PraisonAI Benchmark Example")
110      parser.add_argument(
111          "--mode",
112          choices=["quick", "full", "agent"],
113          default="quick",
114          help="Benchmark mode: quick (2 paths), full (all paths), agent (agent focus)"
115      )
116      parser.add_argument(
117          "--output",
118          type=str,
119          default=None,
120          help="Output JSON file path"
121      )
122      
123      args = parser.parse_args()
124      
125      # Check for API key
126      if not os.environ.get("OPENAI_API_KEY"):
127          print("Error: OPENAI_API_KEY environment variable not set")
128          sys.exit(1)
129      
130      # Run selected benchmark
131      if args.mode == "quick":
132          report = run_quick_comparison()
133      elif args.mode == "full":
134          report = run_full_benchmark()
135      elif args.mode == "agent":
136          report = run_agent_benchmark()
137      
138      # Save results if requested
139      if args.output:
140          save_benchmark_results(report, args.output)
141      
142      print("\n✅ Benchmark complete!")
143  
144  
145  if __name__ == "__main__":
146      main()