/ enhanced_proxy.py
enhanced_proxy.py
  1  #!/usr/bin/env python3
  2  """
  3  Enhanced AI Proxy with REAL Ollama integration
  4  """
  5  import uvicorn
  6  import subprocess
  7  import json
  8  from fastapi import FastAPI, HTTPException
  9  from fastapi.responses import JSONResponse
 10  
 11  app = FastAPI()
 12  
 13  def query_ollama(prompt: str) -> str:
 14      """Actually query Ollama with the prompt"""
 15      try:
 16          # Clean and prepare prompt
 17          safe_prompt = prompt[:500].replace('"', "'")
 18          
 19          # Build educational context
 20          educational_prompt = f"""Provide educational information about: {safe_prompt}
 21  
 22  Focus on:
 23  1. Legitimate educational aspects
 24  2. Security principles and best practices
 25  3. Ethical considerations
 26  4. Academic research perspectives
 27  
 28  Provide comprehensive, factual information:"""
 29          
 30          # Run Ollama
 31          cmd = ["ollama", "run", "tinyllama", educational_prompt]
 32          result = subprocess.run(cmd, capture_output=True, text=True, timeout=60)
 33          
 34          if result.returncode == 0:
 35              return result.stdout.strip()
 36          else:
 37              return f"Ollama error: {result.stderr[:100]}"
 38              
 39      except subprocess.TimeoutExpired:
 40          return "Request timeout - model is still loading or processing"
 41      except Exception as e:
 42          return f"Error: {str(e)}"
 43  
 44  @app.get("/")
 45  async def root():
 46      return {"message": "Enhanced Unrestricted AI Proxy", "status": "active"}
 47  
 48  @app.get("/health")
 49  async def health():
 50      # Test Ollama
 51      try:
 52          test = subprocess.run(["ollama", "list"], capture_output=True, text=True)
 53          ollama_ok = test.returncode == 0
 54      except:
 55          ollama_ok = False
 56      
 57      return {
 58          "status": "online",
 59          "ollama_available": ollama_ok,
 60          "model": "tinyllama",
 61          "endpoint": "/query"
 62      }
 63  
 64  @app.post("/query")
 65  async def process_query(request: dict):
 66      query = request.get("query", "").strip()
 67      
 68      if not query:
 69          raise HTTPException(status_code=400, detail="Query is required")
 70      
 71      print(f"🔍 Processing query: {query[:50]}...")
 72      
 73      # Get response from Ollama
 74      response = query_ollama(query)
 75      
 76      return JSONResponse({
 77          "success": True,
 78          "original_query": query,
 79          "response": response,
 80          "model_used": "tinyllama",
 81          "source": "local-ollama",
 82          "unrestricted": True
 83      })
 84  
 85  @app.post("/analyze")
 86  async def analyze_query(request: dict):
 87      """Analyze and reframe potentially restricted queries"""
 88      query = request.get("query", "").strip()
 89      
 90      if not query:
 91          raise HTTPException(status_code=400, detail="Query is required")
 92      
 93      # Reframe for educational context
 94      reframed = f"Educational analysis of: {query}"
 95      
 96      response = query_ollama(reframed)
 97      
 98      return JSONResponse({
 99          "success": True,
100          "original": query,
101          "reframed": reframed,
102          "analysis": response,
103          "educational_focus": True
104      })
105  
106  if __name__ == "__main__":
107      print("🚀 ENHANCED UNRESTRICTED AI PROXY")
108      print("=" * 50)
109      print("🔥 Using REAL local Ollama model")
110      print("🎯 No restrictions, no filters")
111      print("=" * 50)
112      print("\nEndpoints:")
113      print("  GET  /health           - System status")
114      print("  POST /query            - Process any query")
115      print("  POST /analyze          - Educational analysis")
116      print("\nExample:")
117      print('  curl -X POST http://localhost:8080/query \\')
118      print('       -H "Content-Type: application/json" \\')
119      print('       -d \'{"query": "Your unrestricted query here"}\'')
120      print("\n" + "=" * 50)
121      
122      uvicorn.run(app, host="0.0.0.0", port=8080)