/ free_ai_orchestrator.py
free_ai_orchestrator.py
1 #!/usr/bin/env python3 2 """ 3 FREE AI ORCHESTRATOR - Local First, Cloud Fallback 4 """ 5 6 import os 7 import sys 8 import json 9 import time 10 import subprocess 11 from datetime import datetime 12 13 # Check and install requests if needed 14 try: 15 import requests 16 except ImportError: 17 print("Installing requests...") 18 subprocess.check_call([sys.executable, "-m", "pip", "install", "requests"]) 19 import requests 20 21 def check_local_models(): 22 """Check available local models""" 23 try: 24 response = requests.get("http://localhost:11434/api/tags", timeout=5) 25 if response.status_code == 200: 26 data = response.json() 27 return [model["name"] for model in data.get("models", [])] 28 except: 29 return [] 30 return [] 31 32 def query_local(prompt, model="dolphin-mistral:latest"): 33 """Query local Ollama model""" 34 try: 35 payload = { 36 "model": model, 37 "prompt": prompt, 38 "stream": False, 39 "options": { 40 "temperature": 0.7, 41 "num_predict": 2000 42 } 43 } 44 response = requests.post( 45 "http://localhost:11434/api/generate", 46 json=payload, 47 timeout=60 48 ) 49 if response.status_code == 200: 50 return response.json().get("response", "") 51 except Exception as e: 52 print(f"Local error: {e}") 53 return None 54 55 def query_groq(prompt): 56 """Query Groq API""" 57 try: 58 api_key = "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" 59 headers = { 60 "Authorization": f"Bearer {api_key}", 61 "Content-Type": "application/json" 62 } 63 payload = { 64 "model": "mixtral-8x7b-32768", 65 "messages": [{"role": "user", "content": prompt}], 66 "max_tokens": 2000, 67 "temperature": 0.7 68 } 69 response = requests.post( 70 "https://api.groq.com/openai/v1/chat/completions", 71 headers=headers, 72 json=payload, 73 timeout=30 74 ) 75 if response.status_code == 200: 76 data = response.json() 77 return data.get("choices", [{}])[0].get("message", {}).get("content", "") 78 except Exception as e: 79 print(f"Groq error: {e}") 80 return None 81 82 def save_response(prompt, response, source): 83 """Save response to file""" 84 timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 85 filename = f"ai_response_{timestamp}.txt" 86 87 with open(filename, 'w') as f: 88 f.write("="*60 + "\n") 89 f.write("AI ORCHESTRATOR - UNRESTRICTED RESPONSE\n") 90 f.write("="*60 + "\n\n") 91 f.write(f"PROMPT:\n{prompt}\n\n") 92 f.write(f"SOURCE: {source.upper()}\n") 93 f.write("="*60 + "\n") 94 f.write("RESPONSE:\n") 95 f.write("="*60 + "\n") 96 f.write(response + "\n") 97 f.write("="*60 + "\n") 98 99 return filename 100 101 def main(): 102 """Main function""" 103 print("\n" + "="*60) 104 print("đ¤ UNRESTRICTED AI ORCHESTRATOR") 105 print("="*60) 106 107 # Check available models 108 local_models = check_local_models() 109 print(f"\nđ Available local models: {len(local_models)}") 110 for model in local_models[:5]: # Show first 5 111 print(f" ⢠{model}") 112 113 # Get prompt from command line or input 114 if len(sys.argv) > 1: 115 prompt = " ".join(sys.argv[1:]) 116 else: 117 print("\nđŻ Enter your question (press Enter for test):") 118 prompt = input("> ").strip() 119 if not prompt: 120 prompt = "Explain why open source AI models are important for freedom of information" 121 122 print(f"\nđ Processing: {prompt[:100]}..." if len(prompt) > 100 else f"\nđ Processing: {prompt}") 123 print("-" * 60) 124 125 # Try local first (unrestricted) 126 start_time = time.time() 127 response = query_local(prompt) 128 129 if response: 130 source = "local" 131 print(f"\nâ FROM LOCAL MODEL (100% UNRESTRICTED)") 132 print("-" * 60) 133 print(response) 134 print("-" * 60) 135 else: 136 # Fallback to Groq 137 print("\nâ ď¸ Local model unavailable, using Groq...") 138 response = query_groq(prompt) 139 if response: 140 source = "groq" 141 print(f"\nâ ď¸ FROM GROQ (RESTRICTED - local unavailable)") 142 print("-" * 60) 143 print(response) 144 print("-" * 60) 145 else: 146 print("\nâ No AI resources available!") 147 return 148 149 elapsed = time.time() - start_time 150 151 # Save response 152 filename = save_response(prompt, response, source) 153 154 # Print summary 155 print(f"\nđ SUMMARY:") 156 print(f" Source: {source.upper()}") 157 print(f" Time: {elapsed:.2f} seconds") 158 print(f" Saved to: {filename}") 159 print(f"\nđĄ Tip: Use 'ollama serve' to ensure local models are always available") 160 print("="*60) 161 162 if __name__ == "__main__": 163 main()