/ ultimate_orchestrator.py
ultimate_orchestrator.py
  1  #!/usr/bin/env python3
  2  """
  3  ULTIMATE SWARM ORCHESTRATOR - DEC 2025
  4  Uses all your working APIs with correct models
  5  """
  6  
  7  import os
  8  import sys
  9  import json
 10  import re
 11  import asyncio
 12  import urllib.request
 13  import urllib.error
 14  from datetime import datetime
 15  from typing import List, Dict, Any
 16  
 17  # ========== API CONFIGURATION ==========
 18  class APIConfig:
 19      """API configurations with working models"""
 20      
 21      GOOGLE_MODELS = [
 22          "gemini-2.0-flash-001",      # Stable, fast, free
 23          "gemini-2.0-flash-lite-001", # Even faster, free
 24          "gemini-2.5-flash",         # Latest, 1M context
 25          "gemini-flash-latest",      # Always latest
 26      ]
 27      
 28      GROQ_MODELS = [
 29          "llama3-8b-8192",           # Free tier
 30          "mixtral-8x7b-32768",       # Fast, accurate
 31          "gemma-7b-it",              # Google's model
 32      ]
 33      
 34      HF_MODELS = [
 35          "microsoft/DialoGPT-small", # Fast, always available
 36          "gpt2",                     # Basic but reliable
 37          "google/flan-t5-base",      # Good for instructions
 38      ]
 39  
 40  class PrivacyEngine:
 41      """Advanced privacy protection"""
 42      
 43      def __init__(self):
 44          self.masking_rules = [
 45              (r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b', '[EMAIL]'),
 46              (r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', '[IP]'),
 47              (r'\b\d{3}[-.]?\d{3}[-.]?\d{4}\b', '[PHONE]'),
 48              (r'\b[A-Z]{2,10}-\d{3,5}\b', '[ID]'),
 49          ]
 50      
 51      def mask(self, text: str) -> str:
 52          """Mask sensitive information"""
 53          masked = text
 54          for pattern, replacement in self.masking_rules:
 55              masked = re.sub(pattern, replacement, masked, flags=re.IGNORECASE)
 56          return masked
 57  
 58  class AIProvider:
 59      """Base class for AI providers"""
 60      
 61      def __init__(self, name: str, api_key: str):
 62          self.name = name
 63          self.api_key = api_key
 64      
 65      async def query(self, prompt: str) -> str:
 66          """Query the AI provider"""
 67          raise NotImplementedError
 68  
 69  class GoogleProvider(AIProvider):
 70      """Google Gemini API"""
 71      
 72      def __init__(self, api_key: str):
 73          super().__init__("Google Gemini", api_key)
 74          self.models = APIConfig.GOOGLE_MODELS
 75          self.current_model_index = 0
 76      
 77      def get_next_model(self) -> str:
 78          """Rotate through available models"""
 79          model = self.models[self.current_model_index]
 80          self.current_model_index = (self.current_model_index + 1) % len(self.models)
 81          return model
 82      
 83      async def query(self, prompt: str) -> str:
 84          """Query Google Gemini"""
 85          model = self.get_next_model()
 86          url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={self.api_key}"
 87          
 88          payload = {
 89              "contents": [{"parts": [{"text": prompt}]}],
 90              "generationConfig": {
 91                  "maxOutputTokens": 500,
 92                  "temperature": 0.7,
 93                  "topP": 0.8,
 94                  "topK": 40
 95              },
 96              "safetySettings": [
 97                  {
 98                      "category": "HARM_CATEGORY_HARASSMENT",
 99                      "threshold": "BLOCK_MEDIUM_AND_ABOVE"
100                  }
101              ]
102          }
103          
104          try:
105              data = json.dumps(payload).encode('utf-8')
106              req = urllib.request.Request(
107                  url,
108                  data=data,
109                  headers={'Content-Type': 'application/json'}
110              )
111              
112              with urllib.request.urlopen(req, timeout=20) as response:
113                  result = json.loads(response.read().decode('utf-8'))
114                  
115                  # Extract response
116                  if 'candidates' in result and result['candidates']:
117                      text = result['candidates'][0]['content']['parts'][0]['text']
118                      return f"šŸ¤– {self.name} ({model}):\n{text}"
119                  else:
120                      return f"āŒ {self.name}: No response from model {model}"
121                      
122          except urllib.error.HTTPError as e:
123              return f"āŒ {self.name} HTTP {e.code}: {e.reason}"
124          except Exception as e:
125              return f"āŒ {self.name}: {str(e)[:100]}"
126  
127  class GroqProvider(AIProvider):
128      """Groq API"""
129      
130      def __init__(self, api_key: str):
131          super().__init__("Groq", api_key)
132          self.models = APIConfig.GROQ_MODELS
133          self.current_model_index = 0
134      
135      def get_next_model(self) -> str:
136          """Rotate through available models"""
137          model = self.models[self.current_model_index]
138          self.current_model_index = (self.current_model_index + 1) % len(self.models)
139          return model
140      
141      async def query(self, prompt: str) -> str:
142          """Query Groq API"""
143          model = self.get_next_model()
144          url = "https://api.groq.com/openai/v1/chat/completions"
145          
146          payload = {
147              "model": model,
148              "messages": [{"role": "user", "content": prompt}],
149              "max_tokens": 500,
150              "temperature": 0.7,
151              "top_p": 0.8
152          }
153          
154          try:
155              data = json.dumps(payload).encode('utf-8')
156              req = urllib.request.Request(
157                  url,
158                  data=data,
159                  headers={
160                      'Authorization': f'Bearer {self.api_key}',
161                      'Content-Type': 'application/json'
162                  }
163              )
164              
165              with urllib.request.urlopen(req, timeout=20) as response:
166                  result = json.loads(response.read().decode('utf-8'))
167                  
168                  if 'choices' in result and result['choices']:
169                      text = result['choices'][0]['message']['content']
170                      return f"⚔ {self.name} ({model}):\n{text}"
171                  else:
172                      return f"āŒ {self.name}: No response from model {model}"
173                      
174          except urllib.error.HTTPError as e:
175              return f"āŒ {self.name} HTTP {e.code}: {e.reason}"
176          except Exception as e:
177              return f"āŒ {self.name}: {str(e)[:100]}"
178  
179  class HuggingFaceProvider(AIProvider):
180      """Hugging Face Inference API"""
181      
182      def __init__(self, api_key: str):
183          super().__init__("Hugging Face", api_key)
184          self.models = APIConfig.HF_MODELS
185          self.current_model_index = 0
186      
187      def get_next_model(self) -> str:
188          """Rotate through available models"""
189          model = self.models[self.current_model_index]
190          self.current_model_index = (self.current_model_index + 1) % len(self.models)
191          return model
192      
193      async def query(self, prompt: str) -> str:
194          """Query Hugging Face"""
195          model = self.get_next_model()
196          url = f"https://api-inference.huggingface.co/models/{model}"
197          
198          payload = {
199              "inputs": prompt,
200              "parameters": {
201                  "max_new_tokens": 300,
202                  "temperature": 0.7,
203                  "return_full_text": False
204              }
205          }
206          
207          try:
208              data = json.dumps(payload).encode('utf-8')
209              req = urllib.request.Request(
210                  url,
211                  data=data,
212                  headers={
213                      'Authorization': f'Bearer {self.api_key}',
214                      'Content-Type': 'application/json'
215                  }
216              )
217              
218              with urllib.request.urlopen(req, timeout=30) as response:
219                  result = json.loads(response.read().decode('utf-8'))
220                  
221                  if isinstance(result, list) and len(result) > 0:
222                      if 'generated_text' in result[0]:
223                          text = result[0]['generated_text']
224                      else:
225                          text = str(result[0])
226                      return f"šŸ¤— {self.name} ({model}):\n{text[:500]}..."
227                  else:
228                      return f"āŒ {self.name}: Model {model} is loading or unavailable"
229                      
230          except urllib.error.HTTPError as e:
231              return f"āŒ {self.name} HTTP {e.code}: {e.reason}"
232          except Exception as e:
233              return f"āŒ {self.name}: {str(e)[:100]}"
234  
235  class TaskOrchestrator:
236      """Main orchestrator with intelligent task decomposition"""
237      
238      def __init__(self):
239          self.privacy = PrivacyEngine()
240          
241          # Initialize all available providers
242          self.providers = []
243          
244          # Google Gemini
245          google_key = "AIzaSyD_4aAx2tnLIguX7XUOmleCbYhHtKgdHl0"
246          if google_key and google_key != "YOUR_KEY_HERE":
247              self.providers.append(GoogleProvider(google_key))
248          
249          # Groq
250          groq_key = "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"
251          if groq_key and groq_key != "YOUR_KEY_HERE":
252              self.providers.append(GroqProvider(groq_key))
253          
254          # Hugging Face
255          hf_key = "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"
256          if hf_key and hf_key != "YOUR_KEY_HERE":
257              self.providers.append(HuggingFaceProvider(hf_key))
258          
259          print("=" * 70)
260          print("šŸš€ ULTIMATE AI ORCHESTRATOR - DEC 2025")
261          print("=" * 70)
262          print(f"āœ… Loaded {len(self.providers)} AI Providers:")
263          for provider in self.providers:
264              print(f"   • {provider.name}")
265          print("=" * 70)
266      
267      def decompose_task(self, task: str) -> List[str]:
268          """Intelligently break down complex tasks"""
269          
270          # Simple decomposition based on task type
271          if len(task) < 100:
272              # Short task - create different perspectives
273              return [
274                  f"Explain: {task}",
275                  f"Provide examples of: {task}",
276                  f"Discuss applications of: {task}",
277                  f"Give key points about: {task}"
278              ]
279          else:
280              # Long task - break into logical parts
281              return [
282                  f"Summarize the main idea: {task[:100]}...",
283                  f"Explain technical aspects: {task[:100]}...",
284                  f"Discuss practical implications: {task[:100]}...",
285                  f"Provide analysis: {task[:100]}..."
286              ]
287      
288      async def orchestrate(self, user_task: str) -> Dict[str, Any]:
289          """Main orchestration workflow"""
290          
291          print(f"\nšŸŽÆ USER TASK: {user_task}")
292          
293          # Step 1: Privacy Protection
294          print("\nšŸ”’ Step 1: Privacy Protection...")
295          masked_task = self.privacy.mask(user_task)
296          if masked_task != user_task:
297              print(f"   Masked sensitive data: {masked_task}")
298          
299          # Step 2: Task Decomposition
300          print("\nšŸ” Step 2: Task Analysis & Decomposition...")
301          subtasks = self.decompose_task(masked_task)
302          print(f"   Created {len(subtasks)} intelligent subtasks")
303          
304          # Step 3: Parallel Processing
305          print(f"\n⚔ Step 3: Parallel AI Processing ({len(self.providers)} providers)...")
306          
307          # Distribute subtasks to providers
308          provider_tasks = []
309          for i, provider in enumerate(self.providers):
310              if i < len(subtasks):
311                  subtask = subtasks[i]
312                  print(f"   {provider.name}: {subtask[:60]}...")
313                  provider_tasks.append(provider.query(subtask))
314              else:
315                  # If more providers than subtasks, use first subtask
316                  print(f"   {provider.name}: {subtasks[0][:60]}...")
317                  provider_tasks.append(provider.query(subtasks[0]))
318          
319          # Run all queries in parallel
320          results = await asyncio.gather(*provider_tasks)
321          
322          # Step 4: Results Compilation
323          print("\nšŸ“Š Step 4: Results Compilation...")
324          
325          return {
326              "original_task": user_task,
327              "masked_task": masked_task,
328              "subtasks": subtasks,
329              "providers": [p.name for p in self.providers],
330              "responses": results,
331              "timestamp": datetime.now().isoformat()
332          }
333      
334      def generate_report(self, results: Dict[str, Any]) -> str:
335          """Generate comprehensive report"""
336          
337          report = f"""
338  {'='*70}
339  šŸ¤– AI ORCHESTRATION REPORT
340  {'='*70}
341  
342  šŸ“… Timestamp: {results['timestamp']}
343  šŸŽÆ Original Task: {results['original_task']}
344  
345  šŸ“Š EXECUTED SUBTASKS:
346  """
347          
348          for i, subtask in enumerate(results['subtasks'], 1):
349              report += f"\n{i}. {subtask[:80]}..."
350          
351          report += f"\n\n{'='*70}\nšŸ“ˆ AI RESPONSES:\n{'='*70}\n"
352          
353          for i, (provider, response) in enumerate(zip(results['providers'], results['responses']), 1):
354              report += f"\n{i}. {provider}:\n"
355              report += "-" * 40 + "\n"
356              report += response + "\n"
357          
358          # Add summary
359          report += f"\n{'='*70}\n✨ EXECUTIVE SUMMARY\n{'='*70}\n"
360          report += f"""
361  • Processed using {len(results['providers'])} AI providers in parallel
362  • Privacy: All sensitive data was masked before processing
363  • Models used: Google Gemini 2.0/2.5, Groq (Llama/Mixtral), Hugging Face
364  • Total queries: {len(results['responses'])}
365  • Successful responses: {sum(1 for r in results['responses'] if 'āŒ' not in r)}
366  
367  šŸ’” RECOMMENDATIONS:
368  1. Compare insights from different AI models above
369  2. Look for consensus among providers
370  3. Implement actionable advice
371  4. Test solutions in safe environment
372  """
373          
374          return report
375  
376  async def main():
377      """Main execution"""
378      
379      print("\n" + "="*70)
380      print("šŸš€ ULTIMATE AI ORCHESTRATOR")
381      print("="*70)
382      
383      # Get task from user
384      if len(sys.argv) > 1:
385          user_task = " ".join(sys.argv[1:])
386      else:
387          print("\nšŸ“ Enter your task or question:")
388          print("Examples:")
389          print("  • 'Explain quantum computing for beginners'")
390          print("  • 'Create a Python script for file encryption'")
391          print("  • 'How to secure a Linux web server'")
392          print("  • 'Analyze the impact of AI on cybersecurity'")
393          print("\nYour task (can include emails/IPs - they'll be masked):")
394          
395          user_task = sys.stdin.readline().strip()
396          if not user_task:
397              user_task = "Explain artificial intelligence and its applications in modern technology"
398      
399      # Initialize orchestrator
400      orchestrator = TaskOrchestrator()
401      
402      if len(orchestrator.providers) == 0:
403          print("\nāŒ No API keys configured!")
404          print("\nPlease set your API keys in the script:")
405          print("1. Google Gemini: AIzaSyD_4aAx2tnLIguX7XUOmleCbYhHtKgdHl0")
406          print("2. Groq: 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")
407          print("3. Hugging Face: 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")
408          return
409      
410      try:
411          print("\n" + "="*70)
412          print("šŸ”„ PROCESSING WITH INTELLIGENT ORCHESTRATION...")
413          print("="*70)
414          
415          # Run orchestration
416          results = await orchestrator.orchestrate(user_task)
417          
418          # Generate and display report
419          report = orchestrator.generate_report(results)
420          print(report)
421          
422          # Save results
423          timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
424          
425          # Save JSON (structured data)
426          json_file = f"orchestration_{timestamp}.json"
427          with open(json_file, 'w', encoding='utf-8') as f:
428              json.dump(results, f, indent=2, ensure_ascii=False)
429          
430          # Save text report (readable)
431          txt_file = f"orchestration_{timestamp}.txt"
432          with open(txt_file, 'w', encoding='utf-8') as f:
433              f.write(report)
434          
435          print(f"\nšŸ’¾ Results saved to:")
436          print(f"   • {json_file} (structured JSON)")
437          print(f"   • {txt_file} (readable report)")
438          
439          print("\n" + "="*70)
440          print("āœ… ORCHESTRATION COMPLETE!")
441          print("="*70)
442          
443      except KeyboardInterrupt:
444          print("\n\nā¹ļø  Orchestration cancelled by user")
445      except Exception as e:
446          print(f"\nāŒ Error during orchestration: {e}")
447          import traceback
448          traceback.print_exc()
449  
450  if __name__ == "__main__":
451      # Run the orchestrator
452      asyncio.run(main())