/ tests / cli / test_cli_provider_resolution.py
test_cli_provider_resolution.py
  1  import importlib
  2  import sys
  3  import types
  4  from contextlib import nullcontext
  5  from types import SimpleNamespace
  6  
  7  import pytest
  8  
  9  from hermes_cli.auth import AuthError
 10  from hermes_cli import main as hermes_main
 11  
 12  
 13  # ---------------------------------------------------------------------------
 14  # Module isolation: _import_cli() wipes tools.* / cli / run_agent from
 15  # sys.modules so it can re-import cli fresh.  Without cleanup the wiped
 16  # modules leak into subsequent tests on the same xdist worker, breaking
 17  # mock patches that target "tools.file_tools._get_file_ops" etc.
 18  # ---------------------------------------------------------------------------
 19  
 20  def _reset_modules(prefixes: tuple[str, ...]):
 21      for name in list(sys.modules):
 22          if any(name == p or name.startswith(p + ".") for p in prefixes):
 23              sys.modules.pop(name, None)
 24  
 25  
 26  @pytest.fixture(autouse=True)
 27  def _restore_cli_and_tool_modules():
 28      """Save and restore tools/cli/run_agent modules around every test."""
 29      prefixes = ("tools", "cli", "run_agent")
 30      original_modules = {
 31          name: module
 32          for name, module in sys.modules.items()
 33          if any(name == p or name.startswith(p + ".") for p in prefixes)
 34      }
 35      try:
 36          yield
 37      finally:
 38          _reset_modules(prefixes)
 39          sys.modules.update(original_modules)
 40  
 41  
 42  def _install_prompt_toolkit_stubs():
 43      class _Dummy:
 44          def __init__(self, *args, **kwargs):
 45              pass
 46  
 47      class _Condition:
 48          def __init__(self, func):
 49              self.func = func
 50  
 51          def __bool__(self):
 52              return bool(self.func())
 53  
 54      class _ANSI(str):
 55          pass
 56  
 57      root = types.ModuleType("prompt_toolkit")
 58      history = types.ModuleType("prompt_toolkit.history")
 59      styles = types.ModuleType("prompt_toolkit.styles")
 60      patch_stdout = types.ModuleType("prompt_toolkit.patch_stdout")
 61      application = types.ModuleType("prompt_toolkit.application")
 62      layout = types.ModuleType("prompt_toolkit.layout")
 63      processors = types.ModuleType("prompt_toolkit.layout.processors")
 64      filters = types.ModuleType("prompt_toolkit.filters")
 65      dimension = types.ModuleType("prompt_toolkit.layout.dimension")
 66      menus = types.ModuleType("prompt_toolkit.layout.menus")
 67      widgets = types.ModuleType("prompt_toolkit.widgets")
 68      key_binding = types.ModuleType("prompt_toolkit.key_binding")
 69      completion = types.ModuleType("prompt_toolkit.completion")
 70      formatted_text = types.ModuleType("prompt_toolkit.formatted_text")
 71  
 72      history.FileHistory = _Dummy
 73      styles.Style = _Dummy
 74      patch_stdout.patch_stdout = lambda *args, **kwargs: nullcontext()
 75      application.Application = _Dummy
 76      layout.Layout = _Dummy
 77      layout.HSplit = _Dummy
 78      layout.Window = _Dummy
 79      layout.FormattedTextControl = _Dummy
 80      layout.ConditionalContainer = _Dummy
 81      processors.Processor = _Dummy
 82      processors.Transformation = _Dummy
 83      processors.PasswordProcessor = _Dummy
 84      processors.ConditionalProcessor = _Dummy
 85      filters.Condition = _Condition
 86      dimension.Dimension = _Dummy
 87      menus.CompletionsMenu = _Dummy
 88      widgets.TextArea = _Dummy
 89      key_binding.KeyBindings = _Dummy
 90      completion.Completer = _Dummy
 91      completion.Completion = _Dummy
 92      formatted_text.ANSI = _ANSI
 93      root.print_formatted_text = lambda *args, **kwargs: None
 94  
 95      sys.modules.setdefault("prompt_toolkit", root)
 96      sys.modules.setdefault("prompt_toolkit.history", history)
 97      sys.modules.setdefault("prompt_toolkit.styles", styles)
 98      sys.modules.setdefault("prompt_toolkit.patch_stdout", patch_stdout)
 99      sys.modules.setdefault("prompt_toolkit.application", application)
100      sys.modules.setdefault("prompt_toolkit.layout", layout)
101      sys.modules.setdefault("prompt_toolkit.layout.processors", processors)
102      sys.modules.setdefault("prompt_toolkit.filters", filters)
103      sys.modules.setdefault("prompt_toolkit.layout.dimension", dimension)
104      sys.modules.setdefault("prompt_toolkit.layout.menus", menus)
105      sys.modules.setdefault("prompt_toolkit.widgets", widgets)
106      sys.modules.setdefault("prompt_toolkit.key_binding", key_binding)
107      sys.modules.setdefault("prompt_toolkit.completion", completion)
108      sys.modules.setdefault("prompt_toolkit.formatted_text", formatted_text)
109  
110  
111  def _import_cli():
112      for name in list(sys.modules):
113          if name == "cli" or name == "run_agent" or name == "tools" or name.startswith("tools."):
114              sys.modules.pop(name, None)
115  
116      if "firecrawl" not in sys.modules:
117          sys.modules["firecrawl"] = types.SimpleNamespace(Firecrawl=object)
118  
119      try:
120          importlib.import_module("prompt_toolkit")
121      except ModuleNotFoundError:
122          _install_prompt_toolkit_stubs()
123      return importlib.import_module("cli")
124  
125  
126  def test_hermes_cli_init_does_not_eagerly_resolve_runtime_provider(monkeypatch):
127      cli = _import_cli()
128      calls = {"count": 0}
129  
130      def _unexpected_runtime_resolve(**kwargs):
131          calls["count"] += 1
132          raise AssertionError("resolve_runtime_provider should not be called in HermesCLI.__init__")
133  
134      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _unexpected_runtime_resolve)
135      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
136  
137      shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
138  
139      assert shell is not None
140      assert calls["count"] == 0
141  
142  
143  def test_runtime_resolution_failure_is_not_sticky(monkeypatch):
144      cli = _import_cli()
145      calls = {"count": 0}
146  
147      def _runtime_resolve(**kwargs):
148          calls["count"] += 1
149          if calls["count"] == 1:
150              raise RuntimeError("temporary auth failure")
151          return {
152              "provider": "openrouter",
153              "api_mode": "chat_completions",
154              "base_url": "https://openrouter.ai/api/v1",
155              "api_key": "test-key",
156              "source": "env/config",
157          }
158  
159      class _DummyAgent:
160          def __init__(self, *args, **kwargs):
161              self.kwargs = kwargs
162  
163      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
164      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
165      monkeypatch.setattr(cli, "AIAgent", _DummyAgent)
166  
167      shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
168  
169      assert shell._init_agent() is False
170      assert shell._init_agent() is True
171      assert calls["count"] == 2
172      assert shell.agent is not None
173  
174  
175  def test_runtime_resolution_rebuilds_agent_on_routing_change(monkeypatch):
176      cli = _import_cli()
177  
178      def _runtime_resolve(**kwargs):
179          return {
180              "provider": "openai-codex",
181              "api_mode": "codex_responses",
182              "base_url": "https://same-endpoint.example/v1",
183              "api_key": "same-key",
184              "source": "env/config",
185          }
186  
187      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
188      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
189  
190      shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
191      shell.provider = "openrouter"
192      shell.api_mode = "chat_completions"
193      shell.base_url = "https://same-endpoint.example/v1"
194      shell.api_key = "same-key"
195      shell.agent = object()
196  
197      assert shell._ensure_runtime_credentials() is True
198      assert shell.agent is None
199      assert shell.provider == "openai-codex"
200      assert shell.api_mode == "codex_responses"
201  
202  
203  def test_cli_turn_routing_uses_primary_when_disabled(monkeypatch):
204      cli = _import_cli()
205      shell = cli.HermesCLI(model="gpt-5", compact=True, max_turns=1)
206      shell.provider = "openrouter"
207      shell.api_mode = "chat_completions"
208      shell.base_url = "https://openrouter.ai/api/v1"
209      shell.api_key = "sk-primary"
210  
211      result = shell._resolve_turn_agent_config("what time is it in tokyo?")
212  
213      assert result["model"] == "gpt-5"
214      assert result["runtime"]["provider"] == "openrouter"
215  
216  
217  def test_cli_prefers_config_provider_over_stale_env_override(monkeypatch):
218      cli = _import_cli()
219  
220      monkeypatch.setenv("HERMES_INFERENCE_PROVIDER", "openrouter")
221      config_copy = dict(cli.CLI_CONFIG)
222      model_copy = dict(config_copy.get("model", {}))
223      model_copy["provider"] = "custom"
224      model_copy["base_url"] = "https://api.fireworks.ai/inference/v1"
225      config_copy["model"] = model_copy
226      monkeypatch.setattr(cli, "CLI_CONFIG", config_copy)
227  
228      shell = cli.HermesCLI(model="fireworks/minimax-m2p5", compact=True, max_turns=1)
229  
230      assert shell.requested_provider == "custom"
231  
232  
233  def test_codex_provider_replaces_incompatible_default_model(monkeypatch):
234      """When provider resolves to openai-codex and no model was explicitly
235      chosen, the global config default (e.g. anthropic/claude-opus-4.6) must
236      be replaced with a Codex-compatible model.  Fixes #651."""
237      cli = _import_cli()
238  
239      monkeypatch.delenv("LLM_MODEL", raising=False)
240      monkeypatch.delenv("OPENAI_MODEL", raising=False)
241      # Ensure local user config does not leak a model into the test
242      monkeypatch.setitem(cli.CLI_CONFIG, "model", {
243          "default": "",
244          "base_url": "https://openrouter.ai/api/v1",
245      })
246  
247      def _runtime_resolve(**kwargs):
248          return {
249              "provider": "openai-codex",
250              "api_mode": "codex_responses",
251              "base_url": "https://chatgpt.com/backend-api/codex",
252              "api_key": "test-key",
253              "source": "env/config",
254          }
255  
256      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
257      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
258      monkeypatch.setattr(
259          "hermes_cli.codex_models.get_codex_model_ids",
260          lambda access_token=None: ["gpt-5.2-codex", "gpt-5.1-codex-mini"],
261      )
262  
263      shell = cli.HermesCLI(compact=True, max_turns=1)
264  
265      assert shell._model_is_default is True
266      assert shell._ensure_runtime_credentials() is True
267      assert shell.provider == "openai-codex"
268      assert "anthropic" not in shell.model
269      assert "claude" not in shell.model
270      assert shell.model == "gpt-5.2-codex"
271  
272  
273  def test_model_flow_nous_prints_subscription_guidance_without_mutating_explicit_tts(monkeypatch, capsys):
274      monkeypatch.setattr("hermes_cli.nous_subscription.managed_nous_tools_enabled", lambda: True)
275      config = {
276          "model": {"provider": "nous", "default": "claude-opus-4-6"},
277          "tts": {"provider": "elevenlabs"},
278          "browser": {"cloud_provider": "browser-use"},
279      }
280  
281      monkeypatch.setattr(
282          "hermes_cli.auth.get_provider_auth_state",
283          lambda provider: {"access_token": "nous-token"},
284      )
285      monkeypatch.setattr(
286          "hermes_cli.auth.resolve_nous_runtime_credentials",
287          lambda *args, **kwargs: {
288              "base_url": "https://inference.example.com/v1",
289              "api_key": "nous-key",
290          },
291      )
292      monkeypatch.setattr(
293          "hermes_cli.auth.fetch_nous_models",
294          lambda *args, **kwargs: ["claude-opus-4-6"],
295      )
296      monkeypatch.setattr("hermes_cli.auth._prompt_model_selection", lambda model_ids, current_model="", pricing=None, **kw: "claude-opus-4-6")
297      monkeypatch.setattr("hermes_cli.auth._save_model_choice", lambda model: None)
298      monkeypatch.setattr("hermes_cli.auth._update_config_for_provider", lambda provider, url: None)
299  
300      hermes_main._model_flow_nous(config, current_model="claude-opus-4-6")
301  
302      out = capsys.readouterr().out
303      assert "Default model set to:" in out
304      assert config["tts"]["provider"] == "elevenlabs"
305      assert config["browser"]["cloud_provider"] == "browser-use"
306  
307  
308  def test_model_flow_nous_offers_tool_gateway_prompt_when_unconfigured(monkeypatch, capsys):
309      monkeypatch.setattr("hermes_cli.nous_subscription.managed_nous_tools_enabled", lambda: True)
310      config = {
311          "model": {"provider": "nous", "default": "claude-opus-4-6"},
312          "tts": {"provider": "edge"},
313      }
314  
315      monkeypatch.setattr(
316          "hermes_cli.auth.get_provider_auth_state",
317          lambda provider: {"access_token": "***"},
318      )
319      monkeypatch.setattr(
320          "hermes_cli.auth.resolve_nous_runtime_credentials",
321          lambda *args, **kwargs: {
322              "base_url": "https://inference.example.com/v1",
323              "api_key": "***",
324          },
325      )
326      monkeypatch.setattr(
327          "hermes_cli.auth.fetch_nous_models",
328          lambda *args, **kwargs: ["claude-opus-4-6"],
329      )
330      monkeypatch.setattr("hermes_cli.auth._prompt_model_selection", lambda model_ids, current_model="", pricing=None, **kw: "claude-opus-4-6")
331      monkeypatch.setattr("hermes_cli.auth._save_model_choice", lambda model: None)
332      monkeypatch.setattr("hermes_cli.auth._update_config_for_provider", lambda provider, url: None)
333      hermes_main._model_flow_nous(config, current_model="claude-opus-4-6")
334  
335      out = capsys.readouterr().out
336      # Tool Gateway prompt should be shown (input() raises OSError in pytest
337      # which is caught, so the prompt text appears but nothing is applied)
338      assert "Tool Gateway" in out
339  
340  
341  def test_codex_provider_uses_config_model(monkeypatch):
342      """Model comes from config.yaml, not LLM_MODEL env var.
343      Config.yaml is the single source of truth to avoid multi-agent conflicts."""
344      cli = _import_cli()
345  
346      # LLM_MODEL env var should be IGNORED (even if set)
347      monkeypatch.setenv("LLM_MODEL", "should-be-ignored")
348      monkeypatch.delenv("OPENAI_MODEL", raising=False)
349  
350      # Set model via config
351      monkeypatch.setitem(cli.CLI_CONFIG, "model", {
352          "default": "gpt-5.2-codex",
353          "provider": "openai-codex",
354          "base_url": "https://chatgpt.com/backend-api/codex",
355      })
356  
357      def _runtime_resolve(**kwargs):
358          return {
359              "provider": "openai-codex",
360              "api_mode": "codex_responses",
361              "base_url": "https://chatgpt.com/backend-api/codex",
362              "api_key": "fake-codex-token",
363              "source": "env/config",
364          }
365  
366      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
367      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
368      # Prevent live API call from overriding the config model
369      monkeypatch.setattr(
370          "hermes_cli.codex_models.get_codex_model_ids",
371          lambda access_token=None: ["gpt-5.2-codex"],
372      )
373  
374      shell = cli.HermesCLI(compact=True, max_turns=1)
375  
376      assert shell._ensure_runtime_credentials() is True
377      assert shell.provider == "openai-codex"
378      # Model from config (may be normalized by codex provider logic)
379      assert "codex" in shell.model.lower()
380      # LLM_MODEL env var is NOT used
381      assert shell.model != "should-be-ignored"
382  
383  
384  def test_codex_config_model_not_replaced_by_normalization(monkeypatch):
385      """When the user sets model.default in config.yaml to a specific codex
386      model, _normalize_model_for_provider must NOT replace it with the latest
387      available model from the API.  Regression test for #1887."""
388      cli = _import_cli()
389  
390      monkeypatch.delenv("LLM_MODEL", raising=False)
391      monkeypatch.delenv("OPENAI_MODEL", raising=False)
392  
393      # User explicitly configured gpt-5.3-codex in config.yaml
394      monkeypatch.setitem(cli.CLI_CONFIG, "model", {
395          "default": "gpt-5.3-codex",
396          "provider": "openai-codex",
397          "base_url": "https://chatgpt.com/backend-api/codex",
398      })
399  
400      def _runtime_resolve(**kwargs):
401          return {
402              "provider": "openai-codex",
403              "api_mode": "codex_responses",
404              "base_url": "https://chatgpt.com/backend-api/codex",
405              "api_key": "fake-key",
406              "source": "env/config",
407          }
408  
409      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
410      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
411      # API returns a DIFFERENT model than what the user configured
412      monkeypatch.setattr(
413          "hermes_cli.codex_models.get_codex_model_ids",
414          lambda access_token=None: ["gpt-5.4", "gpt-5.3-codex"],
415      )
416  
417      shell = cli.HermesCLI(compact=True, max_turns=1)
418  
419      # Config model is NOT the global default — user made a deliberate choice
420      assert shell._model_is_default is False
421      assert shell._ensure_runtime_credentials() is True
422      assert shell.provider == "openai-codex"
423      # Model must stay as user configured, not replaced by gpt-5.4
424      assert shell.model == "gpt-5.3-codex"
425  
426  
427  def test_codex_provider_preserves_explicit_codex_model(monkeypatch):
428      """If the user explicitly passes a Codex-compatible model, it must be
429      preserved even when the provider resolves to openai-codex."""
430      cli = _import_cli()
431  
432      monkeypatch.delenv("LLM_MODEL", raising=False)
433      monkeypatch.delenv("OPENAI_MODEL", raising=False)
434  
435      def _runtime_resolve(**kwargs):
436          return {
437              "provider": "openai-codex",
438              "api_mode": "codex_responses",
439              "base_url": "https://chatgpt.com/backend-api/codex",
440              "api_key": "test-key",
441              "source": "env/config",
442          }
443  
444      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
445      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
446  
447      shell = cli.HermesCLI(model="gpt-5.1-codex-mini", compact=True, max_turns=1)
448  
449      assert shell._model_is_default is False
450      assert shell._ensure_runtime_credentials() is True
451      assert shell.model == "gpt-5.1-codex-mini"
452  
453  
454  def test_codex_provider_strips_provider_prefix_from_model(monkeypatch):
455      """openai/gpt-5.3-codex should become gpt-5.3-codex — the Codex
456      Responses API does not accept provider-prefixed model slugs."""
457      cli = _import_cli()
458  
459      monkeypatch.delenv("LLM_MODEL", raising=False)
460      monkeypatch.delenv("OPENAI_MODEL", raising=False)
461  
462      def _runtime_resolve(**kwargs):
463          return {
464              "provider": "openai-codex",
465              "api_mode": "codex_responses",
466              "base_url": "https://chatgpt.com/backend-api/codex",
467              "api_key": "test-key",
468              "source": "env/config",
469          }
470  
471      monkeypatch.setattr("hermes_cli.runtime_provider.resolve_runtime_provider", _runtime_resolve)
472      monkeypatch.setattr("hermes_cli.runtime_provider.format_runtime_provider_error", lambda exc: str(exc))
473  
474      shell = cli.HermesCLI(model="openai/gpt-5.3-codex", compact=True, max_turns=1)
475  
476      assert shell._ensure_runtime_credentials() is True
477      assert shell.model == "gpt-5.3-codex"
478  
479  
480  def test_cmd_model_falls_back_to_auto_on_invalid_provider(monkeypatch, capsys):
481      monkeypatch.setattr(
482          "hermes_cli.config.load_config",
483          lambda: {"model": {"default": "gpt-5", "provider": "invalid-provider"}},
484      )
485      monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
486      monkeypatch.setattr("hermes_cli.config.get_env_value", lambda key: "")
487      monkeypatch.setattr("hermes_cli.config.save_env_value", lambda key, value: None)
488  
489      def _resolve_provider(requested, **kwargs):
490          if requested == "invalid-provider":
491              raise AuthError("Unknown provider 'invalid-provider'.", code="invalid_provider")
492          return "openrouter"
493  
494      monkeypatch.setattr("hermes_cli.auth.resolve_provider", _resolve_provider)
495      monkeypatch.setattr(hermes_main, "_prompt_provider_choice", lambda choices, **kwargs: len(choices) - 1)
496      monkeypatch.setattr("sys.stdin", type("FakeTTY", (), {"isatty": lambda self: True})())
497  
498      hermes_main.cmd_model(SimpleNamespace())
499      output = capsys.readouterr().out
500  
501      assert "Warning:" in output
502      assert "falling back to auto provider detection" in output.lower()
503      assert "No change." in output
504  
505  
506  def test_model_flow_custom_saves_verified_v1_base_url(monkeypatch, capsys):
507      monkeypatch.setattr(
508          "hermes_cli.config.get_env_value",
509          lambda key: "" if key in {"OPENAI_BASE_URL", "OPENAI_API_KEY"} else "",
510      )
511      saved_env = {}
512      monkeypatch.setattr("hermes_cli.config.save_env_value", lambda key, value: saved_env.__setitem__(key, value))
513      monkeypatch.setattr("hermes_cli.auth._save_model_choice", lambda model: saved_env.__setitem__("MODEL", model))
514      monkeypatch.setattr("hermes_cli.auth.deactivate_provider", lambda: None)
515      monkeypatch.setattr("hermes_cli.main._save_custom_provider", lambda *args, **kwargs: None)
516      monkeypatch.setattr(
517          "hermes_cli.models.probe_api_models",
518          lambda api_key, base_url: {
519              "models": ["llm"],
520              "probed_url": "http://localhost:8000/v1/models",
521              "resolved_base_url": "http://localhost:8000/v1",
522              "suggested_base_url": "http://localhost:8000/v1",
523              "used_fallback": True,
524          },
525      )
526      monkeypatch.setattr(
527          "hermes_cli.config.load_config",
528          lambda: {"model": {"default": "", "provider": "custom", "base_url": ""}},
529      )
530      monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
531  
532      # After the probe detects a single model ("llm"), the flow asks
533      # "Use this model? [Y/n]:" — confirm with Enter, then context length,
534      # then display name.
535      answers = iter(["http://localhost:8000", "local-key", "", "", "", ""])
536      monkeypatch.setattr("builtins.input", lambda _prompt="": next(answers))
537      monkeypatch.setattr("getpass.getpass", lambda _prompt="": next(answers))
538  
539      hermes_main._model_flow_custom({})
540      output = capsys.readouterr().out
541  
542      assert "Saving the working base URL instead" in output
543      assert "Detected model: llm" in output
544      # OPENAI_BASE_URL is no longer saved to .env — config.yaml is authoritative
545      assert "OPENAI_BASE_URL" not in saved_env
546      assert saved_env["MODEL"] == "llm"
547  
548  
549  def test_cmd_model_forwards_nous_login_tls_options(monkeypatch):
550      monkeypatch.setattr(hermes_main, "_require_tty", lambda *a: None)
551      monkeypatch.setattr(
552          "hermes_cli.config.load_config",
553          lambda: {"model": {"default": "gpt-5", "provider": "nous"}},
554      )
555      monkeypatch.setattr("hermes_cli.config.save_config", lambda cfg: None)
556      monkeypatch.setattr("hermes_cli.config.get_env_value", lambda key: "")
557      monkeypatch.setattr("hermes_cli.config.save_env_value", lambda key, value: None)
558      monkeypatch.setattr("hermes_cli.auth.resolve_provider", lambda requested, **kwargs: "nous")
559      monkeypatch.setattr("hermes_cli.auth.get_provider_auth_state", lambda provider_id: None)
560      monkeypatch.setattr(hermes_main, "_prompt_provider_choice", lambda choices, **kwargs: 0)
561  
562      captured = {}
563  
564      def _fake_login(login_args, provider_config):
565          captured["portal_url"] = login_args.portal_url
566          captured["inference_url"] = login_args.inference_url
567          captured["client_id"] = login_args.client_id
568          captured["scope"] = login_args.scope
569          captured["no_browser"] = login_args.no_browser
570          captured["timeout"] = login_args.timeout
571          captured["ca_bundle"] = login_args.ca_bundle
572          captured["insecure"] = login_args.insecure
573  
574      monkeypatch.setattr("hermes_cli.auth._login_nous", _fake_login)
575  
576      hermes_main.cmd_model(
577          SimpleNamespace(
578              portal_url="https://portal.nousresearch.com",
579              inference_url="https://inference.nousresearch.com/v1",
580              client_id="hermes-local",
581              scope="openid profile",
582              no_browser=True,
583              timeout=7.5,
584              ca_bundle="/tmp/local-ca.pem",
585              insecure=True,
586          )
587      )
588  
589      assert captured == {
590          "portal_url": "https://portal.nousresearch.com",
591          "inference_url": "https://inference.nousresearch.com/v1",
592          "client_id": "hermes-local",
593          "scope": "openid profile",
594          "no_browser": True,
595          "timeout": 7.5,
596          "ca_bundle": "/tmp/local-ca.pem",
597          "insecure": True,
598      }
599  
600  
601  # ---------------------------------------------------------------------------
602  # _auto_provider_name — unit tests
603  # ---------------------------------------------------------------------------
604  
605  def test_auto_provider_name_localhost():
606      from hermes_cli.main import _auto_provider_name
607      assert _auto_provider_name("http://localhost:11434/v1") == "Local (localhost:11434)"
608      assert _auto_provider_name("http://127.0.0.1:1234/v1") == "Local (127.0.0.1:1234)"
609  
610  
611  def test_auto_provider_name_runpod():
612      from hermes_cli.main import _auto_provider_name
613      assert "RunPod" in _auto_provider_name("https://xyz.runpod.io/v1")
614  
615  
616  def test_auto_provider_name_remote():
617      from hermes_cli.main import _auto_provider_name
618      result = _auto_provider_name("https://api.together.xyz/v1")
619      assert result == "Api.together.xyz"
620  
621  
622  def test_save_custom_provider_uses_provided_name(monkeypatch, tmp_path):
623      """When a display name is passed, it should appear in the saved entry."""
624      import yaml
625      from hermes_cli.main import _save_custom_provider
626  
627      cfg_path = tmp_path / "config.yaml"
628      cfg_path.write_text(yaml.dump({}))
629  
630      monkeypatch.setattr(
631          "hermes_cli.config.load_config", lambda: yaml.safe_load(cfg_path.read_text()) or {},
632      )
633      saved = {}
634      def _save(cfg):
635          saved.update(cfg)
636      monkeypatch.setattr("hermes_cli.config.save_config", _save)
637  
638      _save_custom_provider("http://localhost:11434/v1", name="Ollama")
639      entries = saved.get("custom_providers", [])
640      assert len(entries) == 1
641      assert entries[0]["name"] == "Ollama"