/ tests / hermes_cli / test_timeouts.py
test_timeouts.py
  1  from __future__ import annotations
  2  
  3  import textwrap
  4  
  5  from hermes_cli.timeouts import (
  6      get_provider_request_timeout,
  7      get_provider_stale_timeout,
  8  )
  9  
 10  
 11  def _write_config(tmp_path, body: str) -> None:
 12      (tmp_path / "config.yaml").write_text(textwrap.dedent(body), encoding="utf-8")
 13  
 14  
 15  def test_model_timeout_override_wins(monkeypatch, tmp_path):
 16      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
 17      _write_config(
 18          tmp_path,
 19          """\
 20          providers:
 21            anthropic:
 22              request_timeout_seconds: 30
 23              models:
 24                claude-opus-4.6:
 25                  timeout_seconds: 120
 26          """,
 27      )
 28  
 29      assert get_provider_request_timeout("anthropic", "claude-opus-4.6") == 120.0
 30  
 31  
 32  def test_provider_timeout_used_when_no_model_override(monkeypatch, tmp_path):
 33      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
 34      _write_config(
 35          tmp_path,
 36          """\
 37          providers:
 38            ollama-local:
 39              request_timeout_seconds: 300
 40          """,
 41      )
 42  
 43      assert get_provider_request_timeout("ollama-local", "qwen3:32b") == 300.0
 44  
 45  
 46  def test_model_stale_timeout_override_wins(monkeypatch, tmp_path):
 47      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
 48      _write_config(
 49          tmp_path,
 50          """\
 51          providers:
 52            openai-codex:
 53              stale_timeout_seconds: 600
 54              models:
 55                gpt-5.4:
 56                  stale_timeout_seconds: 1800
 57          """,
 58      )
 59  
 60      assert get_provider_stale_timeout("openai-codex", "gpt-5.4") == 1800.0
 61  
 62  
 63  def test_provider_stale_timeout_used_when_no_model_override(monkeypatch, tmp_path):
 64      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
 65      _write_config(
 66          tmp_path,
 67          """\
 68          providers:
 69            openai-codex:
 70              stale_timeout_seconds: 900
 71          """,
 72      )
 73  
 74      assert get_provider_stale_timeout("openai-codex", "gpt-5.4") == 900.0
 75  
 76  
 77  def test_missing_timeout_returns_none(monkeypatch, tmp_path):
 78      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
 79      _write_config(
 80          tmp_path,
 81          """\
 82          providers:
 83            anthropic:
 84              models:
 85                claude-opus-4.6:
 86                  context_length: 200000
 87          """,
 88      )
 89  
 90      assert get_provider_request_timeout("anthropic", "claude-opus-4.6") is None
 91      assert get_provider_request_timeout("missing-provider", "claude-opus-4.6") is None
 92  
 93  
 94  def test_invalid_timeout_values_return_none(monkeypatch, tmp_path):
 95      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
 96      _write_config(
 97          tmp_path,
 98          """\
 99          providers:
100            anthropic:
101              request_timeout_seconds: "fast"
102              models:
103                claude-opus-4.6:
104                  timeout_seconds: -5
105            ollama-local:
106              request_timeout_seconds: -1
107          """,
108      )
109  
110      assert get_provider_request_timeout("anthropic", "claude-opus-4.6") is None
111      assert get_provider_request_timeout("anthropic", "claude-sonnet-4.5") is None
112      assert get_provider_request_timeout("ollama-local") is None
113  
114  
115  def test_invalid_stale_timeout_values_return_none(monkeypatch, tmp_path):
116      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
117      _write_config(
118          tmp_path,
119          """\
120          providers:
121            openai-codex:
122              stale_timeout_seconds: "slow"
123              models:
124                gpt-5.4:
125                  stale_timeout_seconds: -1
126          """,
127      )
128  
129      assert get_provider_stale_timeout("openai-codex", "gpt-5.4") is None
130      assert get_provider_stale_timeout("openai-codex", "gpt-5.5") is None
131  
132  
133  def test_anthropic_adapter_honors_timeout_kwarg():
134      """build_anthropic_client(timeout=X) overrides the 900s default read timeout."""
135      pytest = __import__("pytest")
136      anthropic = pytest.importorskip("anthropic")  # skip if optional SDK missing
137      from agent.anthropic_adapter import build_anthropic_client
138  
139      c_default = build_anthropic_client("sk-ant-dummy", None)
140      c_custom = build_anthropic_client("sk-ant-dummy", None, timeout=45.0)
141      c_invalid = build_anthropic_client("sk-ant-dummy", None, timeout=-1)
142  
143      # Default stays at 900s; custom overrides; invalid falls back to default
144      assert c_default.timeout.read == 900.0
145      assert c_custom.timeout.read == 45.0
146      assert c_invalid.timeout.read == 900.0
147      # Connect timeout always stays at 10s regardless
148      assert c_default.timeout.connect == 10.0
149      assert c_custom.timeout.connect == 10.0
150  
151  
152  def test_resolved_api_call_timeout_priority(monkeypatch, tmp_path):
153      """AIAgent._resolved_api_call_timeout() honors config > env > default priority."""
154      # Isolate HERMES_HOME
155      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
156      (tmp_path / ".env").write_text("", encoding="utf-8")
157  
158      # Case A: config wins over env var
159      _write_config(tmp_path, """\
160          providers:
161            openrouter:
162              request_timeout_seconds: 77
163              models:
164                openai/gpt-4o-mini:
165                  timeout_seconds: 42
166          """)
167      monkeypatch.setenv("HERMES_API_TIMEOUT", "999")
168  
169      from run_agent import AIAgent
170      agent = AIAgent(
171          model="openai/gpt-4o-mini",
172          provider="openrouter",
173          api_key="sk-dummy",
174          base_url="https://openrouter.ai/api/v1",
175          quiet_mode=True,
176          skip_context_files=True,
177          skip_memory=True,
178          platform="cli",
179      )
180      # Per-model override wins
181      assert agent._resolved_api_call_timeout() == 42.0
182  
183      # Provider-level (different model, no per-model override)
184      agent.model = "some/other-model"
185      assert agent._resolved_api_call_timeout() == 77.0
186  
187      # Case B: no config → env wins
188      _write_config(tmp_path, "")
189      # Clear the cached config load
190      import importlib
191      from hermes_cli import config as cfg_mod
192      importlib.reload(cfg_mod)
193      from hermes_cli import timeouts as to_mod
194      importlib.reload(to_mod)
195      import run_agent as ra_mod
196      importlib.reload(ra_mod)
197  
198      agent2 = ra_mod.AIAgent(
199          model="some/model",
200          provider="openrouter",
201          api_key="sk-dummy",
202          base_url="https://openrouter.ai/api/v1",
203          quiet_mode=True,
204          skip_context_files=True,
205          skip_memory=True,
206          platform="cli",
207      )
208      assert agent2._resolved_api_call_timeout() == 999.0
209  
210      # Case C: no config, no env → 1800.0 default
211      monkeypatch.delenv("HERMES_API_TIMEOUT", raising=False)
212      assert agent2._resolved_api_call_timeout() == 1800.0
213  
214  
215  def test_resolved_api_call_stale_timeout_priority(monkeypatch, tmp_path):
216      """AIAgent stale timeout honors config > env > default priority."""
217      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
218      (tmp_path / ".env").write_text("", encoding="utf-8")
219  
220      _write_config(tmp_path, """\
221          providers:
222            openai-codex:
223              stale_timeout_seconds: 600
224              models:
225                gpt-5.4:
226                  stale_timeout_seconds: 1800
227          """)
228      monkeypatch.setenv("HERMES_API_CALL_STALE_TIMEOUT", "999")
229  
230      from run_agent import AIAgent
231      agent = AIAgent(
232          model="gpt-5.4",
233          provider="openai-codex",
234          api_key="sk-dummy",
235          base_url="https://chatgpt.com/backend-api/codex",
236          quiet_mode=True,
237          skip_context_files=True,
238          skip_memory=True,
239          platform="cli",
240      )
241      assert agent._resolved_api_call_stale_timeout_base() == (1800.0, False)
242  
243      agent.model = "gpt-5.5"
244      assert agent._resolved_api_call_stale_timeout_base() == (600.0, False)
245  
246      _write_config(tmp_path, "")
247      import importlib
248      from hermes_cli import config as cfg_mod
249      importlib.reload(cfg_mod)
250      from hermes_cli import timeouts as to_mod
251      importlib.reload(to_mod)
252      import run_agent as ra_mod
253      importlib.reload(ra_mod)
254  
255      agent2 = ra_mod.AIAgent(
256          model="gpt-5.4",
257          provider="openai-codex",
258          api_key="sk-dummy",
259          base_url="https://chatgpt.com/backend-api/codex",
260          quiet_mode=True,
261          skip_context_files=True,
262          skip_memory=True,
263          platform="cli",
264      )
265      assert agent2._resolved_api_call_stale_timeout_base() == (999.0, False)
266  
267      monkeypatch.delenv("HERMES_API_CALL_STALE_TIMEOUT", raising=False)
268      assert agent2._resolved_api_call_stale_timeout_base() == (300.0, True)
269  
270  
271  def test_default_non_stream_stale_timeout_auto_disables_for_local_endpoints(monkeypatch, tmp_path):
272      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
273      (tmp_path / ".env").write_text("", encoding="utf-8")
274      monkeypatch.delenv("HERMES_API_CALL_STALE_TIMEOUT", raising=False)
275  
276      from run_agent import AIAgent
277      agent = AIAgent(
278          model="qwen3:32b",
279          provider="ollama-local",
280          api_key="sk-dummy",
281          base_url="http://127.0.0.1:11434/v1",
282          quiet_mode=True,
283          skip_context_files=True,
284          skip_memory=True,
285          platform="cli",
286      )
287  
288      assert agent._compute_non_stream_stale_timeout([]) == float("inf")
289  
290  
291  def test_explicit_non_stream_stale_timeout_is_honored_for_local_endpoints(monkeypatch, tmp_path):
292      monkeypatch.setenv("HERMES_HOME", str(tmp_path))
293      (tmp_path / ".env").write_text("", encoding="utf-8")
294      monkeypatch.setenv("HERMES_API_CALL_STALE_TIMEOUT", "300")
295  
296      from run_agent import AIAgent
297      agent = AIAgent(
298          model="qwen3:32b",
299          provider="ollama-local",
300          api_key="sk-dummy",
301          base_url="http://127.0.0.1:11434/v1",
302          quiet_mode=True,
303          skip_context_files=True,
304          skip_memory=True,
305          platform="cli",
306      )
307  
308      assert agent._compute_non_stream_stale_timeout([]) == 300.0