/ tests / hermes_cli / test_codex_models.py
test_codex_models.py
  1  import json
  2  import os
  3  import sys
  4  from unittest.mock import patch
  5  
  6  sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
  7  
  8  from hermes_cli.codex_models import DEFAULT_CODEX_MODELS, get_codex_model_ids
  9  
 10  
 11  def test_get_codex_model_ids_prioritizes_default_and_cache(tmp_path, monkeypatch):
 12      codex_home = tmp_path / "codex-home"
 13      codex_home.mkdir(parents=True, exist_ok=True)
 14      (codex_home / "config.toml").write_text('model = "gpt-5.2-codex"\n')
 15      (codex_home / "models_cache.json").write_text(
 16          json.dumps(
 17              {
 18                  "models": [
 19                      {"slug": "gpt-5.3-codex", "priority": 20, "supported_in_api": True},
 20                      {"slug": "gpt-5.1-codex", "priority": 5, "supported_in_api": True},
 21                      {"slug": "gpt-5.4", "priority": 1, "supported_in_api": True},
 22                      {"slug": "gpt-5-hidden-codex", "priority": 2, "visibility": "hidden"},
 23                  ]
 24              }
 25          )
 26      )
 27      monkeypatch.setenv("CODEX_HOME", str(codex_home))
 28  
 29      models = get_codex_model_ids()
 30  
 31      assert models[0] == "gpt-5.2-codex"
 32      assert "gpt-5.1-codex" in models
 33      assert "gpt-5.3-codex" in models
 34      # Non-codex-suffixed models are included when the cache says they're available
 35      assert "gpt-5.4" in models
 36      assert "gpt-5.4-mini" in models
 37      assert "gpt-5-hidden-codex" not in models
 38  
 39  
 40  def test_setup_wizard_codex_import_resolves():
 41      """Regression test for #712: setup.py must import the correct function name."""
 42      # This mirrors the exact import used in hermes_cli/setup.py line 873.
 43      # A prior bug had 'get_codex_models' (wrong) instead of 'get_codex_model_ids'.
 44      from hermes_cli.codex_models import get_codex_model_ids as setup_import
 45      assert callable(setup_import)
 46  
 47  
 48  def test_get_codex_model_ids_falls_back_to_curated_defaults(tmp_path, monkeypatch):
 49      codex_home = tmp_path / "codex-home"
 50      codex_home.mkdir(parents=True, exist_ok=True)
 51      monkeypatch.setenv("CODEX_HOME", str(codex_home))
 52  
 53      models = get_codex_model_ids()
 54  
 55      assert models[: len(DEFAULT_CODEX_MODELS)] == DEFAULT_CODEX_MODELS
 56      assert "gpt-5.4" in models
 57      assert "gpt-5.3-codex-spark" not in models
 58  
 59  
 60  def test_get_codex_model_ids_adds_forward_compat_models_from_templates(monkeypatch):
 61      monkeypatch.setattr(
 62          "hermes_cli.codex_models._fetch_models_from_api",
 63          lambda access_token: ["gpt-5.2-codex"],
 64      )
 65  
 66      models = get_codex_model_ids(access_token="codex-access-token")
 67  
 68      assert models == ["gpt-5.2-codex", "gpt-5.4-mini", "gpt-5.4", "gpt-5.3-codex"]
 69  
 70  
 71  def test_model_command_uses_runtime_access_token_for_codex_list(monkeypatch):
 72      from hermes_cli.main import _model_flow_openai_codex
 73  
 74      captured = {}
 75      choices = iter(["1"])
 76  
 77      monkeypatch.setattr("builtins.input", lambda prompt="": next(choices))
 78      monkeypatch.setattr(
 79          "hermes_cli.auth.get_codex_auth_status",
 80          lambda: {"logged_in": True},
 81      )
 82      monkeypatch.setattr(
 83          "hermes_cli.auth.resolve_codex_runtime_credentials",
 84          lambda *args, **kwargs: {"api_key": "codex-access-token"},
 85      )
 86  
 87      def _fake_get_codex_model_ids(access_token=None):
 88          captured["access_token"] = access_token
 89          return ["gpt-5.2-codex", "gpt-5.2"]
 90  
 91      def _fake_prompt_model_selection(model_ids, current_model=""):
 92          captured["model_ids"] = list(model_ids)
 93          captured["current_model"] = current_model
 94          return None
 95  
 96      monkeypatch.setattr(
 97          "hermes_cli.codex_models.get_codex_model_ids",
 98          _fake_get_codex_model_ids,
 99      )
100      monkeypatch.setattr(
101          "hermes_cli.auth._prompt_model_selection",
102          _fake_prompt_model_selection,
103      )
104  
105      _model_flow_openai_codex({}, current_model="openai/gpt-5.4")
106  
107      assert captured["access_token"] == "codex-access-token"
108      assert captured["model_ids"] == ["gpt-5.2-codex", "gpt-5.2"]
109      assert captured["current_model"] == "openai/gpt-5.4"
110  
111  
112  def test_model_command_prompts_to_reuse_or_reauthenticate_codex_session(monkeypatch, capsys):
113      from hermes_cli.main import _model_flow_openai_codex
114  
115      captured = {"login_calls": 0}
116      choices = iter(["2"])
117  
118      monkeypatch.setattr("builtins.input", lambda prompt="": next(choices))
119      monkeypatch.setattr(
120          "hermes_cli.auth.get_codex_auth_status",
121          lambda: {"logged_in": True, "source": "hermes-auth-store"},
122      )
123      monkeypatch.setattr(
124          "hermes_cli.auth.resolve_codex_runtime_credentials",
125          lambda *args, **kwargs: {"api_key": "fresh-codex-token"},
126      )
127  
128      def _fake_login(*args, force_new_login=False, **kwargs):
129          captured["login_calls"] += 1
130          captured["force_new_login"] = force_new_login
131  
132      monkeypatch.setattr("hermes_cli.auth._login_openai_codex", _fake_login)
133      monkeypatch.setattr(
134          "hermes_cli.codex_models.get_codex_model_ids",
135          lambda access_token=None: ["gpt-5.4", "gpt-5.3-codex"],
136      )
137      monkeypatch.setattr(
138          "hermes_cli.auth._prompt_model_selection",
139          lambda model_ids, current_model="": None,
140      )
141  
142      _model_flow_openai_codex({}, current_model="gpt-5.4")
143  
144      out = capsys.readouterr().out
145      assert "Use existing credentials" in out
146      assert "Reauthenticate (new OAuth login)" in out
147      assert captured["login_calls"] == 1
148      assert captured["force_new_login"] is True
149  
150  
151  def test_model_command_uses_existing_codex_session_without_relogin(monkeypatch):
152      from hermes_cli.main import _model_flow_openai_codex
153  
154      choices = iter(["1"])
155      captured = {}
156  
157      monkeypatch.setattr("builtins.input", lambda prompt="": next(choices))
158      monkeypatch.setattr(
159          "hermes_cli.auth.get_codex_auth_status",
160          lambda: {"logged_in": True, "source": "hermes-auth-store"},
161      )
162      monkeypatch.setattr(
163          "hermes_cli.auth.resolve_codex_runtime_credentials",
164          lambda *args, **kwargs: {"api_key": "existing-codex-token"},
165      )
166  
167      def _fake_get_codex_model_ids(access_token=None):
168          captured["access_token"] = access_token
169          return ["gpt-5.4"]
170  
171      monkeypatch.setattr(
172          "hermes_cli.codex_models.get_codex_model_ids",
173          _fake_get_codex_model_ids,
174      )
175      monkeypatch.setattr(
176          "hermes_cli.auth._prompt_model_selection",
177          lambda model_ids, current_model="": None,
178      )
179      monkeypatch.setattr(
180          "hermes_cli.auth._login_openai_codex",
181          lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("should not reauthenticate")),
182      )
183  
184      _model_flow_openai_codex({}, current_model="gpt-5.4")
185  
186      assert captured["access_token"] == "existing-codex-token"
187  
188  
189  # ── Tests for _normalize_model_for_provider ──────────────────────────
190  
191  
192  def _make_cli(model="anthropic/claude-opus-4.6", **kwargs):
193      """Create a HermesCLI with minimal mocking."""
194      import cli as _cli_mod
195      from cli import HermesCLI
196  
197      _clean_config = {
198          "model": {
199              "default": "anthropic/claude-opus-4.6",
200              "base_url": "https://openrouter.ai/api/v1",
201              "provider": "auto",
202          },
203          "display": {"compact": False, "tool_progress": "all", "resume_display": "full"},
204          "agent": {},
205          "terminal": {"env_type": "local"},
206      }
207      clean_env = {"LLM_MODEL": "", "HERMES_MAX_ITERATIONS": ""}
208      with (
209          patch("cli.get_tool_definitions", return_value=[]),
210          patch.dict("os.environ", clean_env, clear=False),
211          patch.dict(_cli_mod.__dict__, {"CLI_CONFIG": _clean_config}),
212      ):
213          cli = HermesCLI(model=model, **kwargs)
214      return cli
215  
216  
217  class TestNormalizeModelForProvider:
218      """_normalize_model_for_provider() trusts user-selected models.
219  
220      Only two things happen:
221      1. Provider prefixes are stripped (API needs bare slugs)
222      2. The *untouched default* model is swapped for a Codex model
223      Everything else passes through — the API is the judge.
224      """
225  
226      def test_non_codex_provider_is_noop(self):
227          cli = _make_cli(model="gpt-5.4")
228          changed = cli._normalize_model_for_provider("openrouter")
229          assert changed is False
230          assert cli.model == "gpt-5.4"
231  
232      def test_native_provider_prefix_is_stripped_before_agent_startup(self):
233          cli = _make_cli(model="zai/glm-5.1")
234          changed = cli._normalize_model_for_provider("zai")
235          assert changed is True
236          assert cli.model == "glm-5.1"
237  
238      def test_bare_codex_model_passes_through(self):
239          cli = _make_cli(model="gpt-5.3-codex")
240          changed = cli._normalize_model_for_provider("openai-codex")
241          assert changed is False
242          assert cli.model == "gpt-5.3-codex"
243  
244      def test_bare_non_codex_model_passes_through(self):
245          """gpt-5.4 (no 'codex' suffix) passes through — user chose it."""
246          cli = _make_cli(model="gpt-5.4")
247          changed = cli._normalize_model_for_provider("openai-codex")
248          assert changed is False
249          assert cli.model == "gpt-5.4"
250  
251      def test_any_bare_model_trusted(self):
252          """Even a non-OpenAI bare model passes through — user explicitly set it."""
253          cli = _make_cli(model="claude-opus-4-6")
254          changed = cli._normalize_model_for_provider("openai-codex")
255          # User explicitly chose this model — we trust them, API will error if wrong
256          assert changed is False
257          assert cli.model == "claude-opus-4-6"
258  
259      def test_provider_prefix_stripped(self):
260          """openai/gpt-5.4 → gpt-5.4 (strip prefix, keep model)."""
261          cli = _make_cli(model="openai/gpt-5.4")
262          changed = cli._normalize_model_for_provider("openai-codex")
263          assert changed is True
264          assert cli.model == "gpt-5.4"
265  
266      def test_any_provider_prefix_stripped(self):
267          """anthropic/claude-opus-4.6 → claude-opus-4.6 (strip prefix only).
268          User explicitly chose this — let the API decide if it works."""
269          cli = _make_cli(model="anthropic/claude-opus-4.6")
270          changed = cli._normalize_model_for_provider("openai-codex")
271          assert changed is True
272          assert cli.model == "claude-opus-4.6"
273  
274      def test_opencode_go_prefix_stripped(self):
275          cli = _make_cli(model="opencode-go/kimi-k2.5")
276          cli.api_mode = "chat_completions"
277          changed = cli._normalize_model_for_provider("opencode-go")
278          assert changed is True
279          assert cli.model == "kimi-k2.5"
280          assert cli.api_mode == "chat_completions"
281  
282      def test_opencode_zen_claude_sets_messages_mode(self):
283          cli = _make_cli(model="opencode-zen/claude-sonnet-4-6")
284          cli.api_mode = "chat_completions"
285          changed = cli._normalize_model_for_provider("opencode-zen")
286          assert changed is True
287          assert cli.model == "claude-sonnet-4-6"
288          assert cli.api_mode == "anthropic_messages"
289  
290      def test_default_model_replaced(self):
291          """No model configured (empty default) gets swapped for codex."""
292          import cli as _cli_mod
293          _clean_config = {
294              "model": {
295                  "default": "",
296                  "base_url": "",
297                  "provider": "auto",
298              },
299              "display": {"compact": False, "tool_progress": "all", "resume_display": "full"},
300              "agent": {},
301              "terminal": {"env_type": "local"},
302          }
303          # Don't pass model= so _model_is_default is True
304          with (
305              patch("cli.get_tool_definitions", return_value=[]),
306              patch.dict("os.environ", {"LLM_MODEL": "", "HERMES_MAX_ITERATIONS": ""}, clear=False),
307              patch.dict(_cli_mod.__dict__, {"CLI_CONFIG": _clean_config}),
308          ):
309              from cli import HermesCLI
310              cli = HermesCLI()
311  
312          assert cli._model_is_default is True
313          with patch(
314              "hermes_cli.codex_models.get_codex_model_ids",
315              return_value=["gpt-5.3-codex", "gpt-5.4"],
316          ):
317              changed = cli._normalize_model_for_provider("openai-codex")
318          assert changed is True
319          # Uses first from available list
320          assert cli.model == "gpt-5.3-codex"
321  
322      def test_default_fallback_when_api_fails(self):
323          """No model configured falls back to gpt-5.3-codex when API unreachable."""
324          import cli as _cli_mod
325          _clean_config = {
326              "model": {
327                  "default": "",
328                  "base_url": "",
329                  "provider": "auto",
330              },
331              "display": {"compact": False, "tool_progress": "all", "resume_display": "full"},
332              "agent": {},
333              "terminal": {"env_type": "local"},
334          }
335          with (
336              patch("cli.get_tool_definitions", return_value=[]),
337              patch.dict("os.environ", {"LLM_MODEL": "", "HERMES_MAX_ITERATIONS": ""}, clear=False),
338              patch.dict(_cli_mod.__dict__, {"CLI_CONFIG": _clean_config}),
339          ):
340              from cli import HermesCLI
341              cli = HermesCLI()
342  
343          with patch(
344              "hermes_cli.codex_models.get_codex_model_ids",
345              side_effect=Exception("offline"),
346          ):
347              changed = cli._normalize_model_for_provider("openai-codex")
348          assert changed is True
349          assert cli.model == "gpt-5.3-codex"