test-extended.yml
1 name: Extended Tests 2 3 on: 4 schedule: 5 - cron: '0 2 * * *' 6 workflow_dispatch: 7 8 jobs: 9 test-examples: 10 runs-on: ubuntu-latest 11 timeout-minutes: 15 12 13 steps: 14 - name: Checkout code 15 uses: actions/checkout@v4 16 with: 17 persist-credentials: false 18 with: 19 persist-credentials: false 20 21 - name: Set up Python 22 uses: actions/setup-python@v5 23 with: 24 python-version: 3.11 25 26 - name: Install UV 27 run: | 28 curl -LsSf https://astral.sh/uv/install.sh | sh 29 echo "$HOME/.local/bin" >> $GITHUB_PATH 30 31 32 - name: Install dependencies 33 run: | 34 cd src/praisonai 35 uv pip install --system ."[ui,gradio,api,agentops,google,openai,anthropic,cohere,chat,code,realtime,call,crewai,autogen]" 36 uv pip install --system duckduckgo_search 37 # Install knowledge dependencies from praisonai-agents 38 uv pip install --system "praisonaiagents[knowledge]" 39 40 - name: Set environment variables 41 run: | 42 echo "OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY || 'sk-test-key-for-github-actions-testing-only-not-real' }}" >> $GITHUB_ENV 43 echo "OPENAI_API_BASE=${{ secrets.OPENAI_API_BASE || 'https://api.openai.com/v1' }}" >> $GITHUB_ENV 44 echo "OPENAI_MODEL_NAME=${{ secrets.OPENAI_MODEL_NAME || 'gpt-5-nano' }}" >> $GITHUB_ENV 45 echo "PYTHONPATH=${{ github.workspace }}/src/praisonai-agents:$PYTHONPATH" >> $GITHUB_ENV 46 echo "PRAISONAI_TELEMETRY_DISABLED=true" >> $GITHUB_ENV 47 echo "PRAISONAI_DISABLE_TELEMETRY=true" >> $GITHUB_ENV 48 echo "DO_NOT_TRACK=true" >> $GITHUB_ENV 49 50 - name: Test Key Example Scripts 51 run: | 52 echo "๐งช Testing key example scripts from praisonai-agents..." 53 54 # Create a timeout function for consistent handling 55 timeout_run() { 56 timeout 30s "$@" || echo "โฑ๏ธ $1 test completed/timed out" 57 } 58 59 # Test basic agent functionality 60 timeout_run python ${{ github.workspace }}/src/praisonai-agents/basic-agents.py 61 62 # Test async functionality 63 timeout_run python ${{ github.workspace }}/src/praisonai-agents/async_example.py 64 65 # Test knowledge/RAG functionality 66 timeout_run python ${{ github.workspace }}/src/praisonai-agents/knowledge-agents.py 67 68 # Test MCP functionality 69 timeout_run python ${{ github.workspace }}/src/praisonai-agents/mcp-basic.py 70 71 # Test UI functionality 72 timeout_run python ${{ github.workspace }}/src/praisonai-agents/ui.py 73 74 echo "โ Example script testing completed" 75 continue-on-error: true 76 77 performance-test: 78 runs-on: ubuntu-latest 79 timeout-minutes: 15 80 81 steps: 82 - name: Checkout code 83 uses: actions/checkout@v4 84 with: 85 persist-credentials: false 86 with: 87 persist-credentials: false 88 89 - name: Set up Python 90 uses: actions/setup-python@v5 91 with: 92 python-version: 3.11 93 94 - name: Install UV 95 run: | 96 curl -LsSf https://astral.sh/uv/install.sh | sh 97 echo "$HOME/.local/bin" >> $GITHUB_PATH 98 99 100 - name: Install dependencies 101 run: | 102 cd src/praisonai 103 uv pip install --system ."[ui,gradio,api,agentops,google,openai,anthropic,cohere,chat,code,realtime,call,crewai,autogen]" 104 uv pip install --system pytest pytest-benchmark 105 # Install knowledge dependencies from praisonai-agents 106 uv pip install --system "praisonaiagents[knowledge]" 107 108 - name: Set environment variables 109 run: | 110 echo "PRAISONAI_TELEMETRY_DISABLED=true" >> $GITHUB_ENV 111 echo "PRAISONAI_DISABLE_TELEMETRY=true" >> $GITHUB_ENV 112 echo "DO_NOT_TRACK=true" >> $GITHUB_ENV 113 114 - name: Run Performance Benchmarks 115 run: | 116 echo "๐ Running performance benchmarks..." 117 python -c " 118 import time 119 import sys 120 import statistics 121 sys.path.insert(0, 'src/praisonai') 122 123 print('๐ Testing agent creation performance...') 124 times = [] 125 try: 126 from praisonaiagents import Agent 127 for i in range(5): 128 start_time = time.time() 129 agent = Agent(name=f'PerfAgent{i}') 130 times.append(time.time() - start_time) 131 132 avg_time = statistics.mean(times) 133 print(f'โ Average agent creation time: {avg_time:.3f}s') 134 print(f'๐ Min: {min(times):.3f}s, Max: {max(times):.3f}s') 135 except Exception as e: 136 print(f'โ Agent creation benchmark failed: {e}') 137 138 print('๐ Testing import performance...') 139 start_time = time.time() 140 try: 141 import praisonaiagents 142 import_time = time.time() - start_time 143 print(f'โ Import completed in {import_time:.3f}s') 144 except Exception as e: 145 print(f'โ Import benchmark failed: {e}') 146 147 print('๐ Testing memory usage...') 148 try: 149 import psutil 150 import os 151 process = psutil.Process(os.getpid()) 152 memory_mb = process.memory_info().rss / 1024 / 1024 153 print(f'๐ Memory usage: {memory_mb:.1f} MB') 154 except ImportError: 155 print('โ ๏ธ psutil not available for memory testing') 156 except Exception as e: 157 print(f'โ Memory benchmark failed: {e}') 158 " 159 continue-on-error: true 160 161 - name: Generate Performance Report 162 run: | 163 echo "## ๐ Performance Test Results" > performance_report.md 164 echo "" >> performance_report.md 165 echo "### Benchmarks Run:" >> performance_report.md 166 echo "- โก Agent creation speed" >> performance_report.md 167 echo "- ๐ฆ Import performance" >> performance_report.md 168 echo "- ๐พ Memory usage" >> performance_report.md 169 echo "- ๐งช Example script execution" >> performance_report.md 170 echo "" >> performance_report.md 171 echo "_Performance results are logged in the CI output above._" >> performance_report.md 172 173 - name: Upload Performance Report 174 uses: actions/upload-artifact@v4 175 with: 176 name: performance-report 177 path: performance_report.md 178 retention-days: 5