slash-command-settings-model-handlers.test.ts
1 import { beforeEach, describe, expect, it, vi } from 'vitest' 2 3 import { 4 runModelSetSlashCommand, 5 runReasoningSetSlashCommand, 6 runSettingsDefaultsUpdateSlashCommand, 7 runThinkSetSlashCommand, 8 } from '@/server/chat/slash-command-settings-model-handlers' 9 import { getRuntimeConfig } from '@/server/config/runtime' 10 import { 11 buildSessionSettingsSummary, 12 getSettingsSnapshot, 13 pinSessionProviderToCurrentDefault, 14 resolveEffectiveProviderOverride, 15 updateSettings, 16 } from '@/server/settings/service' 17 import { getAppSettings } from '@/server/storage/app-settings' 18 import { 19 getSessionReasoningLevelOverride, 20 getSessionThinkingLevelOverride, 21 setSessionChatModelOverride, 22 setSessionReasoningLevelOverride, 23 setSessionThinkingLevelOverride, 24 } from '@/server/storage/chat-store' 25 26 vi.mock('@/server/config/runtime', () => ({ 27 getRuntimeConfig: vi.fn(), 28 })) 29 30 vi.mock('@/server/settings/service', () => ({ 31 buildSessionSettingsSummary: vi.fn(), 32 getSettingsSnapshot: vi.fn(), 33 pinSessionProviderToCurrentDefault: vi.fn(), 34 resolveEffectiveProviderOverride: vi.fn(), 35 updateSettings: vi.fn(), 36 })) 37 38 vi.mock('@/server/storage/app-settings', () => ({ 39 getAppSettings: vi.fn(), 40 })) 41 42 vi.mock('@/server/storage/chat-store', () => ({ 43 getSessionChatModelOverride: vi.fn(), 44 getSessionReasoningLevelOverride: vi.fn(), 45 getSessionThinkingLevelOverride: vi.fn(), 46 setSessionChatModelOverride: vi.fn(), 47 setSessionReasoningLevelOverride: vi.fn(), 48 setSessionThinkingLevelOverride: vi.fn(), 49 })) 50 51 describe('slash-command-settings-model-handlers', () => { 52 beforeEach(() => { 53 vi.clearAllMocks() 54 vi.mocked(getRuntimeConfig).mockReturnValue({ 55 llmBaseUrl: 'https://default.example.com', 56 llmApiKey: 'env-key', 57 } as never) 58 vi.mocked(buildSessionSettingsSummary).mockReturnValue({ 59 activeChatModel: 'gpt-5-mini', 60 } as never) 61 vi.mocked(getSettingsSnapshot).mockReturnValue({ 62 defaults: {}, 63 session: {}, 64 } as never) 65 vi.mocked(resolveEffectiveProviderOverride).mockReturnValue({ 66 baseUrl: 'https://effective.example.com', 67 apiKey: 'effective-key', 68 chatEndpointMode: 'responses', 69 } as never) 70 vi.mocked(getAppSettings).mockReturnValue({ 71 thinkingLevel: 'off', 72 reasoningLevel: 'off', 73 } as never) 74 vi.mocked(getSessionThinkingLevelOverride).mockReturnValue('medium') 75 vi.mocked(getSessionReasoningLevelOverride).mockReturnValue('stream') 76 }) 77 78 it('falls back to model help when model set is empty', async () => { 79 const result = await runModelSetSlashCommand({ 80 sessionId: 'session-1', 81 model: ' ', 82 attachmentNote: '\n\nNote: attachments ignored.', 83 }) 84 85 expect(result.text).toContain('Model command help:') 86 expect(result.text).toContain('/model chat: gpt-5-mini') 87 expect(result.text).toContain('Note: attachments ignored.') 88 }) 89 90 it('sets a model override for model set command', async () => { 91 const result = await runModelSetSlashCommand({ 92 sessionId: 'session-1', 93 model: 'gpt-5-pro', 94 attachmentNote: '', 95 }) 96 97 expect(setSessionChatModelOverride).toHaveBeenCalledWith( 98 'session-1', 99 'gpt-5-pro', 100 ) 101 expect(pinSessionProviderToCurrentDefault).toHaveBeenCalledWith('session-1') 102 expect(result.text).toContain("Set this session's chat model to `gpt-5-pro`.") 103 expect(result.provider).toBe('local-command') 104 }) 105 106 it('updates settings defaults and renders post-update summary', async () => { 107 vi.mocked(updateSettings).mockResolvedValue({ 108 defaults: { 109 chatModel: 'gpt-5-mini', 110 providerModelDefaults: { 111 openai: 'gpt-5-mini', 112 router: null, 113 custom: null, 114 }, 115 thinkingLevel: 'medium', 116 reasoningLevel: 'on', 117 providerPreset: 'openai', 118 providerBaseUrl: 'https://global.example.com', 119 providerEndpointMode: 'responses', 120 }, 121 session: { 122 activeChatModel: 'gpt-5-mini', 123 activeThinkingLevel: 'medium', 124 activeReasoningLevel: 'on', 125 providerSourceLabel: 'global defaults', 126 }, 127 } as never) 128 129 const result = await runSettingsDefaultsUpdateSlashCommand({ 130 sessionId: 'session-1', 131 patch: { chatModel: 'gpt-5-mini' }, 132 scopeLabel: 'default model → gpt-5-mini', 133 attachmentNote: '', 134 }) 135 136 expect(updateSettings).toHaveBeenCalledWith({ 137 sessionId: 'session-1', 138 defaults: { 139 chatModel: 'gpt-5-mini', 140 thinkingLevel: undefined, 141 reasoningLevel: undefined, 142 providerPreset: undefined, 143 openAiBaseUrl: undefined, 144 openAiApiKey: undefined, 145 routerBaseUrl: undefined, 146 routerApiKey: undefined, 147 providerBaseUrl: undefined, 148 providerEndpointMode: undefined, 149 providerApiKey: undefined, 150 }, 151 }) 152 expect(result.text).toContain('Updated global defaults: default model → gpt-5-mini') 153 expect(result.text).toContain('- model fallback: gpt-5-mini') 154 expect(result.text).toContain('- provider source: global defaults') 155 }) 156 157 it('sets think/reasoning overrides and reports active values', () => { 158 const thinkResult = runThinkSetSlashCommand({ 159 sessionId: 'session-1', 160 thinkingLevel: 'medium', 161 attachmentNote: '', 162 }) 163 const reasoningResult = runReasoningSetSlashCommand({ 164 sessionId: 'session-1', 165 reasoningLevel: 'stream', 166 attachmentNote: '', 167 }) 168 169 expect(setSessionThinkingLevelOverride).toHaveBeenCalledWith( 170 'session-1', 171 'medium', 172 ) 173 expect(setSessionReasoningLevelOverride).toHaveBeenCalledWith( 174 'session-1', 175 'stream', 176 ) 177 expect(thinkResult.text).toContain('Set session thinking level to `medium`.') 178 expect(thinkResult.text).toContain('Active thinking level is now: medium') 179 expect(reasoningResult.text).toContain( 180 'Set session reasoning visibility to `stream`.', 181 ) 182 expect(reasoningResult.text).toContain( 183 'Active reasoning visibility is now: stream', 184 ) 185 }) 186 })