test_model_switch_copilot_api_mode.py
1 """Regression tests for Copilot api_mode recomputation during /model switch. 2 3 When switching models within the Copilot provider (e.g. GPT-5 → Claude), 4 the stale api_mode from resolve_runtime_provider must be overridden with 5 a fresh value computed from the *new* model. Without the fix, Claude 6 requests went through the Responses API and failed with 7 ``unsupported_api_for_model``. 8 """ 9 10 from unittest.mock import patch 11 12 from hermes_cli.model_switch import switch_model 13 14 15 _MOCK_VALIDATION = { 16 "accepted": True, 17 "persist": True, 18 "recognized": True, 19 "message": None, 20 } 21 22 23 def _run_copilot_switch( 24 raw_input: str, 25 current_provider: str = "copilot", 26 current_model: str = "gpt-5.4", 27 explicit_provider: str = "", 28 runtime_api_mode: str = "codex_responses", 29 ): 30 """Run switch_model with Copilot mocks and return the result.""" 31 with ( 32 patch("hermes_cli.model_switch.resolve_alias", return_value=None), 33 patch("hermes_cli.model_switch.list_provider_models", return_value=[]), 34 patch( 35 "hermes_cli.runtime_provider.resolve_runtime_provider", 36 return_value={ 37 "api_key": "ghu_test_token", 38 "base_url": "https://api.githubcopilot.com", 39 "api_mode": runtime_api_mode, 40 }, 41 ), 42 patch( 43 "hermes_cli.models.validate_requested_model", 44 return_value=_MOCK_VALIDATION, 45 ), 46 patch("hermes_cli.model_switch.get_model_info", return_value=None), 47 patch("hermes_cli.model_switch.get_model_capabilities", return_value=None), 48 patch("hermes_cli.models.detect_provider_for_model", return_value=None), 49 ): 50 return switch_model( 51 raw_input=raw_input, 52 current_provider=current_provider, 53 current_model=current_model, 54 explicit_provider=explicit_provider, 55 ) 56 57 58 def test_same_provider_copilot_switch_recomputes_api_mode(): 59 """GPT-5 → Claude on copilot: api_mode must flip to chat_completions.""" 60 result = _run_copilot_switch( 61 raw_input="claude-opus-4.6", 62 current_provider="copilot", 63 current_model="gpt-5.4", 64 ) 65 66 assert result.success, f"switch_model failed: {result.error_message}" 67 assert result.new_model == "claude-opus-4.6" 68 assert result.target_provider == "copilot" 69 assert result.api_mode == "chat_completions" 70 71 72 def test_explicit_copilot_switch_uses_selected_model_api_mode(): 73 """Cross-provider switch to copilot: api_mode from new model, not stale runtime.""" 74 result = _run_copilot_switch( 75 raw_input="claude-opus-4.6", 76 current_provider="openrouter", 77 current_model="anthropic/claude-sonnet-4.6", 78 explicit_provider="copilot", 79 ) 80 81 assert result.success, f"switch_model failed: {result.error_message}" 82 assert result.new_model == "claude-opus-4.6" 83 assert result.target_provider == "github-copilot" 84 assert result.api_mode == "chat_completions" 85 86 87 def test_copilot_gpt5_keeps_codex_responses(): 88 """GPT-5 → GPT-5 on copilot: api_mode must stay codex_responses.""" 89 result = _run_copilot_switch( 90 raw_input="gpt-5.4-mini", 91 current_provider="copilot", 92 current_model="gpt-5.4", 93 runtime_api_mode="codex_responses", 94 ) 95 96 assert result.success, f"switch_model failed: {result.error_message}" 97 assert result.new_model == "gpt-5.4-mini" 98 assert result.target_provider == "copilot" 99 # gpt-5.4-mini is a GPT-5 variant — should use codex_responses 100 # (gpt-5-mini is the special case that uses chat_completions) 101 assert result.api_mode == "codex_responses"