/ scripts / gemini_assistant.py
gemini_assistant.py
  1  #!/usr/bin/env python3
  2  """
  3  Gemini Fallback Assistant - Use when Claude credits exhausted
  4  
  5  Usage:
  6      python3 scripts/gemini_assistant.py "your question here"
  7      python3 scripts/gemini_assistant.py --interactive
  8      python3 scripts/gemini_assistant.py --file path/to/file.py "explain this code"
  9  
 10  Features:
 11  - Loads Sovereign OS context (CLAUDE.md) automatically
 12  - Supports file input for code review
 13  - Interactive mode for multi-turn conversations
 14  """
 15  
 16  import os
 17  import sys
 18  from pathlib import Path
 19  
 20  # Check for API key
 21  API_KEY = os.environ.get("GEMINI_API_KEY")
 22  if not API_KEY:
 23      print("Error: GEMINI_API_KEY not set")
 24      print("Run: export GEMINI_API_KEY='your-key'")
 25      sys.exit(1)
 26  
 27  try:
 28      import google.generativeai as genai
 29  except ImportError:
 30      print("Installing google-generativeai...")
 31      os.system(f"{sys.executable} -m pip install -q google-generativeai")
 32      import google.generativeai as genai
 33  
 34  # Configure
 35  genai.configure(api_key=API_KEY)
 36  
 37  # Paths
 38  SOVEREIGN_OS = Path(__file__).parent.parent
 39  CLAUDE_MD = SOVEREIGN_OS / "CLAUDE.md"
 40  
 41  
 42  def load_context() -> str:
 43      """Load Sovereign OS context for the model."""
 44      context = ""
 45  
 46      if CLAUDE_MD.exists():
 47          # Load just the key sections to save tokens
 48          content = CLAUDE_MD.read_text()
 49  
 50          # Extract key sections
 51          sections = []
 52          sections.append("# Sovereign OS Context\n")
 53          sections.append("You are helping with the Sovereign OS project.\n\n")
 54  
 55          # Get axioms section
 56          if "## The Four Axioms" in content:
 57              start = content.find("## The Four Axioms")
 58              end = content.find("## Compressed Runtime Test")
 59              if end == -1:
 60                  end = start + 2000
 61              sections.append(content[start:end])
 62  
 63          context = "\n".join(sections)
 64  
 65      return context
 66  
 67  
 68  def read_file(filepath: str) -> str:
 69      """Read a file for code review."""
 70      path = Path(filepath)
 71      if not path.exists():
 72          return f"File not found: {filepath}"
 73      return f"```{path.suffix[1:] if path.suffix else 'text'}\n{path.read_text()}\n```"
 74  
 75  
 76  def chat(prompt: str, file_content: str = None) -> str:
 77      """Send a prompt to Gemini and get response."""
 78      # Try different model names (API keeps changing)
 79      model_names = ['gemini-2.0-flash', 'gemini-1.5-pro', 'gemini-pro', 'models/gemini-pro']
 80      model = None
 81      for name in model_names:
 82          try:
 83              model = genai.GenerativeModel(name)
 84              break
 85          except:
 86              continue
 87      if not model:
 88          return "Error: Could not find a working model"
 89  
 90      # Build full prompt
 91      full_prompt = load_context()
 92  
 93      if file_content:
 94          full_prompt += f"\n\n## File Content:\n{file_content}\n\n"
 95  
 96      full_prompt += f"\n## User Request:\n{prompt}"
 97  
 98      try:
 99          response = model.generate_content(full_prompt)
100          return response.text
101      except Exception as e:
102          return f"Error: {e}"
103  
104  
105  def interactive_mode():
106      """Interactive chat mode."""
107      print("\n" + "="*60)
108      print("GEMINI ASSISTANT - Sovereign OS Fallback")
109      print("="*60)
110      print("Type 'quit' to exit, 'file:path' to load a file")
111      print("="*60 + "\n")
112  
113      # Try different model names
114      model_names = ['gemini-2.0-flash', 'gemini-1.5-pro', 'gemini-pro', 'models/gemini-pro']
115      model = None
116      for name in model_names:
117          try:
118              model = genai.GenerativeModel(name)
119              break
120          except:
121              continue
122      if not model:
123          print("Error: Could not find a working model")
124          return
125      chat_session = model.start_chat(history=[])
126  
127      # Send initial context
128      context = load_context()
129      if context:
130          chat_session.send_message(f"Context for our conversation:\n{context}")
131  
132      while True:
133          try:
134              user_input = input("\nYou: ").strip()
135          except (KeyboardInterrupt, EOFError):
136              print("\nGoodbye!")
137              break
138  
139          if not user_input:
140              continue
141  
142          if user_input.lower() == 'quit':
143              print("Goodbye!")
144              break
145  
146          # Check for file loading
147          if user_input.startswith("file:"):
148              filepath = user_input[5:].strip()
149              content = read_file(filepath)
150              print(f"\nLoaded {filepath}")
151              user_input = input("Question about this file: ").strip()
152              user_input = f"{content}\n\n{user_input}"
153  
154          try:
155              response = chat_session.send_message(user_input)
156              print(f"\nGemini: {response.text}")
157          except Exception as e:
158              print(f"\nError: {e}")
159  
160  
161  def main():
162      args = sys.argv[1:]
163  
164      if not args:
165          print(__doc__)
166          print("\nQuick test:")
167          response = chat("Say 'Gemini fallback is working!' in exactly those words.")
168          print(f"Response: {response}")
169          return
170  
171      if args[0] == "--interactive" or args[0] == "-i":
172          interactive_mode()
173          return
174  
175      file_content = None
176      prompt_start = 0
177  
178      if args[0] == "--file" or args[0] == "-f":
179          if len(args) < 3:
180              print("Usage: --file <path> <prompt>")
181              return
182          file_content = read_file(args[1])
183          prompt_start = 2
184  
185      prompt = " ".join(args[prompt_start:])
186      response = chat(prompt, file_content)
187      print(response)
188  
189  
190  if __name__ == "__main__":
191      main()