/ free_api_orchestrator.py
free_api_orchestrator.py
  1  #!/usr/bin/env python3
  2  """
  3  AI ORCHESTRATOR WITH FREE APIS
  4  Uses completely free LLM APIs where possible
  5  """
  6  
  7  import os
  8  import sys
  9  import json
 10  import asyncio
 11  import aiohttp
 12  from datetime import datetime
 13  from typing import List, Dict
 14  
 15  class FreeAPIManager:
 16      """Manages free LLM APIs"""
 17      
 18      def __init__(self):
 19          # Check for free API keys
 20          self.apis = {
 21              "google": os.getenv("GOOGLE_API_KEY"),
 22              "huggingface": os.getenv("HUGGINGFACE_TOKEN"),
 23              "together": os.getenv("TOGETHER_API_KEY"),
 24              "groq": os.getenv("GROQ_API_KEY")
 25          }
 26          
 27          self.available_apis = {k: v for k, v in self.apis.items() if v}
 28          
 29          print("šŸ” Checking free APIs...")
 30          for api, key in self.available_apis.items():
 31              print(f"   āœ“ {api.upper()}: Available")
 32          
 33          if not self.available_apis:
 34              print("   āš ļø  No free API keys found")
 35              print("\n   Get free API keys from:")
 36              print("   1. Google AI Studio: https://makersuite.google.com/app/apikey")
 37              print("   2. Hugging Face: https://huggingface.co/settings/tokens")
 38              print("   3. Together AI: https://together.ai (free $25 credits)")
 39              print("   4. Groq: https://console.groq.com")
 40      
 41      async def query_google_gemini(self, prompt: str) -> str:
 42          """Query Google Gemini API (FREE: 60 RPM, 1M tokens/month)"""
 43          api_key = self.available_apis.get("google")
 44          if not api_key:
 45              return "[Google API key not set]"
 46          
 47          url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key={api_key}"
 48          
 49          payload = {
 50              "contents": [{
 51                  "parts": [{"text": prompt}]
 52              }],
 53              "generationConfig": {
 54                  "maxOutputTokens": 500,
 55                  "temperature": 0.7
 56              }
 57          }
 58          
 59          try:
 60              async with aiohttp.ClientSession() as session:
 61                  async with session.post(url, json=payload, timeout=30) as response:
 62                      if response.status == 200:
 63                          data = await response.json()
 64                          return data.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0].get("text", "No response")
 65                      else:
 66                          error = await response.text()
 67                          return f"[Google Error {response.status}: {error[:100]}]"
 68          except Exception as e:
 69              return f"[Google Error: {str(e)}]"
 70      
 71      async def query_huggingface(self, prompt: str, model: str = "mistralai/Mixtral-8x7B-Instruct-v0.1") -> str:
 72          """Query Hugging Face Inference API (FREE: 30K tokens/month)"""
 73          token = self.available_apis.get("huggingface")
 74          if not token:
 75              return "[Hugging Face token not set]"
 76          
 77          url = f"https://api-inference.huggingface.co/models/{model}"
 78          headers = {"Authorization": f"Bearer {token}"}
 79          payload = {
 80              "inputs": prompt,
 81              "parameters": {
 82                  "max_new_tokens": 300,
 83                  "temperature": 0.7,
 84                  "return_full_text": False
 85              }
 86          }
 87          
 88          try:
 89              async with aiohttp.ClientSession() as session:
 90                  async with session.post(url, headers=headers, json=payload, timeout=30) as response:
 91                      if response.status == 200:
 92                          data = await response.json()
 93                          if isinstance(data, list) and len(data) > 0:
 94                              return data[0].get("generated_text", "No response")
 95                          return str(data)
 96                      else:
 97                          error = await response.text()
 98                          return f"[HF Error {response.status}: {error[:100]}]"
 99          except Exception as e:
100              return f"[HF Error: {str(e)}]"
101      
102      async def query_together(self, prompt: str, model: str = "mistralai/Mixtral-8x7B-Instruct-v0.1") -> str:
103          """Query Together AI (FREE: $25 credits)"""
104          api_key = self.available_apis.get("together")
105          if not api_key:
106              return "[Together API key not set]"
107          
108          url = "https://api.together.xyz/v1/chat/completions"
109          headers = {"Authorization": f"Bearer {api_key}"}
110          payload = {
111              "model": model,
112              "messages": [{"role": "user", "content": prompt}],
113              "max_tokens": 500,
114              "temperature": 0.7
115          }
116          
117          try:
118              async with aiohttp.ClientSession() as session:
119                  async with session.post(url, headers=headers, json=payload, timeout=30) as response:
120                      if response.status == 200:
121                          data = await response.json()
122                          return data.get("choices", [{}])[0].get("message", {}).get("content", "No response")
123                      else:
124                          error = await response.text()
125                          return f"[Together Error {response.status}: {error[:100]}]"
126          except Exception as e:
127              return f"[Together Error: {str(e)}]"
128      
129      async def query_groq(self, prompt: str, model: str = "mixtral-8x7b-32768") -> str:
130          """Query Groq API (FREE: Limited but generous)"""
131          api_key = self.available_apis.get("groq")
132          if not api_key:
133              return "[Groq API key not set]"
134          
135          url = "https://api.groq.com/openai/v1/chat/completions"
136          headers = {"Authorization": f"Bearer {api_key}"}
137          payload = {
138              "model": model,
139              "messages": [{"role": "user", "content": prompt}],
140              "max_tokens": 500,
141              "temperature": 0.7
142          }
143          
144          try:
145              async with aiohttp.ClientSession() as session:
146                  async with session.post(url, headers=headers, json=payload, timeout=30) as response:
147                      if response.status == 200:
148                          data = await response.json()
149                          return data.get("choices", [{}])[0].get("message", {}).get("content", "No response")
150                      else:
151                          error = await response.text()
152                          return f"[Groq Error {response.status}: {error[:100]}]"
153          except Exception as e:
154              return f"[Groq Error: {str(e)}]"
155      
156      async def query_all_free_apis(self, prompt: str) -> List[Dict]:
157          """Query all available free APIs in parallel"""
158          tasks = []
159          
160          if "google" in self.available_apis:
161              tasks.append(self.query_google_gemini(prompt))
162          if "huggingface" in self.available_apis:
163              tasks.append(self.query_huggingface(prompt))
164          if "together" in self.available_apis:
165              tasks.append(self.query_together(prompt))
166          if "groq" in self.available_apis:
167              tasks.append(self.query_groq(prompt))
168          
169          if not tasks:
170              return []
171          
172          print(f"  Querying {len(tasks)} free API(s) in parallel...")
173          results = await asyncio.gather(*tasks, return_exceptions=True)
174          
175          # Process results
176          processed = []
177          api_names = list(self.available_apis.keys())[:len(tasks)]
178          
179          for i, (api_name, result) in enumerate(zip(api_names, results)):
180              if isinstance(result, Exception):
181                  processed.append({
182                      "api": api_name,
183                      "response": f"[{api_name} Error: {str(result)}]",
184                      "success": False
185                  })
186              else:
187                  processed.append({
188                      "api": api_name,
189                      "response": result,
190                      "success": True
191                  })
192          
193          return processed
194  
195  class LocalModelManager:
196      """Fallback to local models"""
197      
198      def __init__(self, model_path: str = None):
199          self.llama_path = os.path.expanduser("~/llama.cpp/build/bin/llama-cli")
200          
201          # Try to find a model
202          models_dir = os.path.expanduser("~/models")
203          possible_models = [
204              "mistral-7b-instruct-v0.2.Q4_K_M.gguf",
205              "llama-2-7b-chat.Q4_K_M.gguf", 
206              "tinyllama.gguf"
207          ]
208          
209          if model_path and os.path.exists(model_path):
210              self.model_path = model_path
211          else:
212              for model in possible_models:
213                  path = os.path.join(models_dir, model)
214                  if os.path.exists(path):
215                      self.model_path = path
216                      break
217              else:
218                  self.model_path = None
219          
220          if self.model_path:
221              print(f"šŸ¤– Local model: {os.path.basename(self.model_path)}")
222          else:
223              print("āš ļø  No local model found")
224      
225      def query(self, prompt: str, max_tokens: int = 200) -> str:
226          """Query local model as fallback"""
227          if not self.model_path or not os.path.exists(self.llama_path):
228              return "[Local model not available]"
229          
230          cmd = [
231              self.llama_path,
232              "-m", self.model_path,
233              "-p", prompt,
234              "-n", str(max_tokens),
235              "--temp", "0.7",
236              "-no-cnv",
237              "--threads", "4"
238          ]
239          
240          try:
241              import subprocess
242              result = subprocess.run(cmd, capture_output=True, text=True, timeout=30)
243              
244              # Extract response
245              for line in result.stdout.split('\n'):
246                  if prompt in line:
247                      response = line[line.find(prompt) + len(prompt):].strip()
248                      response = response.lstrip(':').strip()
249                      if response:
250                          return response
251              
252              return "[Local: No response]"
253              
254          except Exception as e:
255              return f"[Local Error: {str(e)}]"
256  
257  class FreeAPIOrchestrator:
258      """Orchestrator using free APIs with local fallback"""
259      
260      def __init__(self):
261          self.free_api = FreeAPIManager()
262          self.local_model = LocalModelManager()
263          
264          self.total_apis = len(self.free_api.available_apis)
265          self.has_local = self.local_model.model_path is not None
266          
267          print(f"\nšŸ“Š SYSTEM STATUS:")
268          print(f"   Free APIs available: {self.total_apis}")
269          print(f"   Local model available: {self.has_local}")
270          print(f"   Total processing capacity: {self.total_apis + (1 if self.has_local else 0)}")
271      
272      async def orchestrate_complex_task(self, task: str) -> Dict:
273          """Orchestrate a complex task using all available resources"""
274          print(f"\n{'='*60}")
275          print(f"šŸš€ Processing: {task}")
276          print(f"{'='*60}")
277          
278          # Step 1: Break down task
279          print("\nšŸ” Step 1: Analyzing task...")
280          subtasks = self._create_subtasks(task)
281          print(f"   Created {len(subtasks)} subtasks")
282          
283          # Step 2: Process in parallel
284          print(f"\n⚔ Step 2: Parallel processing ({self.total_apis} APIs + local)...")
285          all_results = []
286          
287          for i, subtask in enumerate(subtasks):
288              print(f"   Subtask {i+1}: {subtask[:50]}...")
289              
290              # Try free APIs first
291              if self.total_apis > 0:
292                  api_results = await self.free_api.query_all_free_apis(subtask)
293                  for api_result in api_results:
294                      if api_result["success"]:
295                          all_results.append(f"[{api_result['api'].upper()}]: {api_result['response'][:100]}...")
296                          break
297                  else:
298                      # All APIs failed, try local
299                      if self.has_local:
300                          local_result = self.local_model.query(subtask)
301                          all_results.append(f"[LOCAL]: {local_result[:100]}...")
302              else:
303                  # No APIs, use local
304                  if self.has_local:
305                      local_result = self.local_model.query(subtask)
306                      all_results.append(f"[LOCAL]: {local_result[:100]}...")
307          
308          # Step 3: Synthesize
309          print(f"\n🧩 Step 3: Synthesizing {len(all_results)} results...")
310          
311          if self.has_local:
312              synthesis_prompt = f"""Combine these analyses into a comprehensive answer:
313              
314              Task: {task}
315              
316              Analyses:
317              {' | '.join(all_results)}
318              
319              Provide a well-structured, comprehensive answer:"""
320              
321              final_answer = self.local_model.query(synthesis_prompt, max_tokens=400)
322          else:
323              final_answer = " | ".join(all_results)
324          
325          # Compile results
326          results = {
327              "task": task,
328              "timestamp": datetime.now().isoformat(),
329              "free_apis_used": self.total_apis,
330              "local_model_used": self.has_local,
331              "subtasks": subtasks,
332              "all_results": all_results,
333              "final_answer": final_answer
334          }
335          
336          return results
337      
338      def _create_subtasks(self, task: str) -> List[str]:
339          """Create subtasks from main task"""
340          # Simple decomposition - can be enhanced
341          return [
342              f"Background and history of {task}",
343              f"Core concepts and principles of {task}",
344              f"Real-world applications of {task}",
345              f"Future developments in {task}"
346          ]
347      
348      def save_results(self, results: Dict):
349          """Save results to file"""
350          timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
351          filename = f"free_api_orchestrator_{timestamp}.txt"
352          
353          with open(filename, 'w') as f:
354              f.write(f"{'='*60}\n")
355              f.write(f"FREE API ORCHESTRATOR RESULTS\n")
356              f.write(f"{'='*60}\n\n")
357              
358              f.write(f"Task: {results['task']}\n")
359              f.write(f"Date: {results['timestamp']}\n")
360              f.write(f"Free APIs used: {results['free_apis_used']}\n")
361              f.write(f"Local model used: {results['local_model_used']}\n\n")
362              
363              f.write(f"SUBTASKS:\n")
364              for i, subtask in enumerate(results['subtasks'], 1):
365                  f.write(f"{i}. {subtask}\n")
366              f.write("\n")
367              
368              f.write(f"RESULTS FROM APIS:\n")
369              for i, result in enumerate(results['all_results'], 1):
370                  f.write(f"{i}. {result}\n")
371              f.write("\n")
372              
373              f.write(f"FINAL ANSWER:\n")
374              f.write(f"{results['final_answer']}\n\n")
375              
376              f.write(f"ARCHITECTURE:\n")
377              f.write("-" * 40 + "\n")
378              f.write("1. Free Cloud APIs (Google, Hugging Face, Together, Groq)\n")
379              f.write("2. Local model fallback (TinyLlama/Llama-2/Mistral)\n")
380              f.write("3. Parallel processing of subtasks\n")
381              f.write("4. Local synthesis of final answer\n")
382          
383          print(f"\nšŸ“ Results saved to: {filename}")
384          return filename
385  
386  async def main():
387      print("šŸš€ FREE API AI ORCHESTRATOR")
388      print("=" * 60)
389      print("This system uses COMPLETELY FREE LLM APIs")
390      print("No credit card required for basic usage")
391      print("=" * 60)
392      
393      # Get task
394      if len(sys.argv) > 1:
395          task = " ".join(sys.argv[1:])
396      else:
397          print("\nEnter a complex topic to process:")
398          print("(e.g., 'Explain quantum computing applications')")
399          task = input("> ").strip()
400          if not task:
401              task = "artificial intelligence and machine learning"
402      
403      # Initialize orchestrator
404      orchestrator = FreeAPIOrchestrator()
405      
406      if orchestrator.total_apis == 0 and not orchestrator.has_local:
407          print("\nāŒ No resources available!")
408          print("\nSetup free APIs:")
409          print("1. Google AI Studio: https://makersuite.google.com/app/apikey")
410          print("2. Hugging Face: https://huggingface.co/settings/tokens")
411          print("3. Together AI: https://together.ai")
412          print("4. Groq: https://console.groq.com")
413          print("\nOr ensure local models are in ~/models/")
414          return
415      
416      # Run orchestration
417      try:
418          results = await orchestrator.orchestrate_complex_task(task)
419          
420          print(f"\n{'='*60}")
421          print(f"āœ… PROCESSING COMPLETE")
422          print(f"{'='*60}")
423          
424          print(f"\nšŸŽÆ FINAL ANSWER:")
425          print(f"{'='*60}")
426          print(results['final_answer'])
427          print(f"{'='*60}")
428          
429          # Save results
430          orchestrator.save_results(results)
431          
432      except Exception as e:
433          print(f"\nāŒ Error: {e}")
434  
435  if __name__ == "__main__":
436      # Install required package if not available
437      try:
438          import aiohttp
439      except ImportError:
440          print("Installing aiohttp...")
441          import subprocess
442          subprocess.run([sys.executable, "-m", "pip", "install", "aiohttp"])
443          import aiohttp
444      
445      # Run the orchestrator
446      asyncio.run(main())