llm-provider-supplement.test.js
1 /** 2 * LLM Provider Supplement Tests 3 * 4 * Covers additional untested paths in src/utils/llm-provider.js: 5 * - getProvider() / getProviderDisplayName() for both providers 6 * - callLLM with OpenRouter: haiku model passthrough, partial usage object, null choices 7 * - callLLM with Anthropic: multiple text blocks joined, non-system-less messages 8 * - Error propagation and logging for OpenRouter 9 * - Headers merging 10 * - json_mode toggling 11 * 12 * NOTE: The existing llm-provider-mocked.test.js is comprehensive. This file 13 * adds supplemental tests focusing on any remaining gaps. 14 */ 15 16 import { describe, test, mock, beforeEach } from 'node:test'; 17 import assert from 'node:assert/strict'; 18 19 // ── Set up mocks BEFORE any import ───────────────────────────────────────── 20 21 const mockAnthropicCreate = mock.fn(); 22 let capturedConstructorArgs = null; 23 24 class MockAnthropicSDK { 25 constructor(opts) { 26 capturedConstructorArgs = opts; 27 this.messages = { create: mockAnthropicCreate }; 28 } 29 } 30 31 mock.module('@anthropic-ai/sdk', { 32 defaultExport: MockAnthropicSDK, 33 }); 34 35 const mockPost = mock.fn(); 36 mock.module('axios', { 37 defaultExport: { post: mockPost }, 38 }); 39 40 mock.module('dotenv', { 41 defaultExport: { config: mock.fn() }, 42 namedExports: { config: mock.fn() }, 43 }); 44 45 // ── Load OpenRouter provider (source is OpenRouter-only now) ──────────────── 46 47 process.env.OPENROUTER_API_KEY = 'supp-openrouter-key'; 48 delete process.env.ANTHROPIC_API_KEY; 49 50 const { 51 callLLM: orCallLLM, 52 getProvider: orGetProvider, 53 getProviderDisplayName: orDisplayName, 54 } = await import('../../src/utils/llm-provider.js?v=supp1'); 55 56 // ── Restore env ───────────────────────────────────────────────────────────── 57 58 delete process.env.OPENROUTER_API_KEY; 59 60 // NOTE: Anthropic supplement tests removed — source is now OpenRouter-only. 61 62 /* --- Anthropic tests removed (OpenRouter-only refactor) --- 63 64 test('getProviderDisplayName returns "Anthropic (Claude)"', () => { 65 assert.equal(anthropicDisplayName(), 'Anthropic (Claude)'); 66 }); 67 68 test('Anthropic SDK constructed with correct API key', () => { 69 assert.equal(capturedConstructorArgs?.apiKey, 'supp-anthropic-key'); 70 }); 71 72 test('callLLM strips system message from messages array', async () => { 73 mockAnthropicCreate.mock.mockImplementation(async () => ({ 74 content: [{ type: 'text', text: 'response' }], 75 usage: { input_tokens: 20, output_tokens: 10 }, 76 })); 77 78 await anthropicCallLLM({ 79 model: 'anthropic/claude-3.5-sonnet', 80 messages: [ 81 { role: 'system', content: 'Be concise.' }, 82 { role: 'user', content: 'Hello' }, 83 { role: 'assistant', content: 'Hi there!' }, 84 ], 85 }); 86 87 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 88 // System message extracted 89 assert.equal(params.system, 'Be concise.'); 90 // Remaining messages should not include system 91 assert.equal(params.messages.length, 2); 92 assert.ok(params.messages.every(m => m.role !== 'system')); 93 }); 94 95 test('callLLM joins multiple text blocks with newline', async () => { 96 mockAnthropicCreate.mock.mockImplementation(async () => ({ 97 content: [ 98 { type: 'text', text: 'First block.' }, 99 { type: 'text', text: 'Second block.' }, 100 { type: 'text', text: 'Third block.' }, 101 ], 102 usage: { input_tokens: 30, output_tokens: 20 }, 103 })); 104 105 const result = await anthropicCallLLM({ 106 model: 'anthropic/claude-3.5-sonnet', 107 messages: [{ role: 'user', content: 'multi' }], 108 }); 109 110 assert.equal(result.content, 'First block.\nSecond block.\nThird block.'); 111 }); 112 113 test('callLLM handles assistant role messages in multi-turn conversation', async () => { 114 mockAnthropicCreate.mock.mockImplementation(async () => ({ 115 content: [{ type: 'text', text: 'answer' }], 116 usage: { input_tokens: 50, output_tokens: 5 }, 117 })); 118 119 await anthropicCallLLM({ 120 model: 'anthropic/claude-3.5-haiku', 121 messages: [ 122 { role: 'user', content: 'Question 1' }, 123 { role: 'assistant', content: 'Answer 1' }, 124 { role: 'user', content: 'Question 2' }, 125 ], 126 }); 127 128 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 129 assert.equal(params.messages.length, 3); 130 assert.equal(params.messages[1].role, 'assistant'); 131 // Verify model mapping for haiku 132 assert.equal(params.model, 'claude-3-5-haiku-20241022'); 133 }); 134 135 test('callLLM with array content passes through non-image_url items as-is', async () => { 136 mockAnthropicCreate.mock.mockImplementation(async () => ({ 137 content: [{ type: 'text', text: 'ok' }], 138 usage: { input_tokens: 10, output_tokens: 5 }, 139 })); 140 141 const customItem = { type: 'document', content: 'some doc' }; 142 143 await anthropicCallLLM({ 144 model: 'anthropic/claude-3.5-sonnet', 145 messages: [ 146 { 147 role: 'user', 148 content: [{ type: 'text', text: 'hello' }, customItem], 149 }, 150 ], 151 }); 152 153 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 154 assert.deepEqual(params.messages[0].content[1], customItem); 155 }); 156 157 test('callLLM maps anthropic/claude-3.5-sonnet to correct versioned model', async () => { 158 mockAnthropicCreate.mock.mockImplementation(async () => ({ 159 content: [{ type: 'text', text: 'ok' }], 160 usage: { input_tokens: 10, output_tokens: 5 }, 161 })); 162 163 await anthropicCallLLM({ 164 model: 'anthropic/claude-3.5-sonnet', 165 messages: [{ role: 'user', content: 'test' }], 166 }); 167 168 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 169 assert.equal(params.model, 'claude-3-5-sonnet-20241022'); 170 }); 171 172 test('callLLM uses default model for unknown model names', async () => { 173 mockAnthropicCreate.mock.mockImplementation(async () => ({ 174 content: [{ type: 'text', text: 'ok' }], 175 usage: { input_tokens: 10, output_tokens: 5 }, 176 })); 177 178 await anthropicCallLLM({ 179 model: 'gpt-4-turbo', 180 messages: [{ role: 'user', content: 'test' }], 181 }); 182 183 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 184 assert.equal(params.model, 'claude-3-5-sonnet-20241022'); 185 }); 186 187 test('callLLM correctly reports promptTokens and completionTokens', async () => { 188 mockAnthropicCreate.mock.mockImplementation(async () => ({ 189 content: [{ type: 'text', text: 'result' }], 190 usage: { input_tokens: 150, output_tokens: 75 }, 191 })); 192 193 const result = await anthropicCallLLM({ 194 model: 'anthropic/claude-3.5-sonnet', 195 messages: [{ role: 'user', content: 'count tokens' }], 196 }); 197 198 assert.equal(result.usage.promptTokens, 150); 199 assert.equal(result.usage.completionTokens, 75); 200 }); 201 202 test('callLLM rejects when Anthropic SDK throws rate limit error', async () => { 203 mockAnthropicCreate.mock.mockImplementation(async () => { 204 throw new Error('Rate limit exceeded'); 205 }); 206 207 await assert.rejects( 208 () => 209 anthropicCallLLM({ 210 model: 'anthropic/claude-3.5-sonnet', 211 messages: [{ role: 'user', content: 'test' }], 212 }), 213 { message: 'Rate limit exceeded' } 214 ); 215 }); 216 217 test('callLLM rejects with "No content" when only non-text blocks returned', async () => { 218 mockAnthropicCreate.mock.mockImplementation(async () => ({ 219 content: [{ type: 'tool_result', content: 'search result' }], 220 usage: { input_tokens: 10, output_tokens: 0 }, 221 })); 222 223 await assert.rejects( 224 () => 225 anthropicCallLLM({ 226 model: 'anthropic/claude-3.5-sonnet', 227 messages: [{ role: 'user', content: 'test' }], 228 }), 229 { message: 'No content in Anthropic API response' } 230 ); 231 }); 232 233 test('callLLM with json_mode does not add response_format (Anthropic handles via prompts)', async () => { 234 mockAnthropicCreate.mock.mockImplementation(async () => ({ 235 content: [{ type: 'text', text: '{"result": true}' }], 236 usage: { input_tokens: 10, output_tokens: 5 }, 237 })); 238 239 await anthropicCallLLM({ 240 model: 'anthropic/claude-3.5-sonnet', 241 messages: [{ role: 'user', content: 'Return JSON' }], 242 json_mode: true, 243 }); 244 245 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 246 assert.ok(!('response_format' in params), 'Anthropic should not have response_format param'); 247 }); 248 249 test('callLLM handles image/gif media type in image_url', async () => { 250 mockAnthropicCreate.mock.mockImplementation(async () => ({ 251 content: [{ type: 'text', text: 'ok' }], 252 usage: { input_tokens: 10, output_tokens: 5 }, 253 })); 254 255 await anthropicCallLLM({ 256 model: 'anthropic/claude-3.5-sonnet', 257 messages: [ 258 { 259 role: 'user', 260 content: [ 261 { 262 type: 'image_url', 263 image_url: { url: 'data:image/gif;base64,R0lGOD==' }, 264 }, 265 ], 266 }, 267 ], 268 }); 269 270 const params = mockAnthropicCreate.mock.calls[0].arguments[0]; 271 const imgItem = params.messages[0].content[0]; 272 assert.equal(imgItem.source.media_type, 'image/gif'); 273 --- end removed Anthropic tests */ 274 275 // ═══════════════════════════════════════════════════════════════════════════ 276 // OPENROUTER SUPPLEMENT TESTS 277 // ═══════════════════════════════════════════════════════════════════════════ 278 279 describe('LLM Provider Supplement - OpenRouter provider', () => { 280 beforeEach(() => { 281 mockPost.mock.resetCalls(); 282 }); 283 284 test('getProvider returns "openrouter"', () => { 285 assert.equal(orGetProvider(), 'openrouter'); 286 }); 287 288 test('getProviderDisplayName returns "OpenRouter"', () => { 289 assert.equal(orDisplayName(), 'OpenRouter'); 290 }); 291 292 test('callLLM sends correct model name in request body', async () => { 293 mockPost.mock.mockImplementation(async () => ({ 294 data: { 295 choices: [{ message: { content: 'ok' } }], 296 usage: { prompt_tokens: 5, completion_tokens: 3 }, 297 }, 298 })); 299 300 await orCallLLM({ 301 model: 'anthropic/claude-3.5-haiku', 302 messages: [{ role: 'user', content: 'test' }], 303 }); 304 305 const [, body] = mockPost.mock.calls[0].arguments; 306 // OpenRouter passes the model name as-is (no mapping) 307 assert.equal(body.model, 'anthropic/claude-3.5-haiku'); 308 }); 309 310 test('callLLM handles partial usage (only prompt_tokens present)', async () => { 311 mockPost.mock.mockImplementation(async () => ({ 312 data: { 313 choices: [{ message: { content: 'partial usage' } }], 314 usage: { prompt_tokens: 50 }, // no completion_tokens 315 }, 316 })); 317 318 const result = await orCallLLM({ 319 model: 'openai/gpt-4o-mini', 320 messages: [{ role: 'user', content: 'test' }], 321 }); 322 323 assert.equal(result.usage.promptTokens, 50); 324 assert.equal(result.usage.completionTokens, 0); 325 }); 326 327 test('callLLM handles missing usage object entirely', async () => { 328 mockPost.mock.mockImplementation(async () => ({ 329 data: { 330 choices: [{ message: { content: 'no usage' } }], 331 // no usage field 332 }, 333 })); 334 335 const result = await orCallLLM({ 336 model: 'openai/gpt-4o-mini', 337 messages: [{ role: 'user', content: 'test' }], 338 }); 339 340 assert.deepEqual(result.usage, { promptTokens: 0, completionTokens: 0 }); 341 }); 342 343 test('callLLM throws when choices[0].message is undefined', async () => { 344 mockPost.mock.mockImplementation(async () => ({ 345 data: { 346 choices: [{}], // no message property 347 usage: { prompt_tokens: 10, completion_tokens: 0 }, 348 }, 349 })); 350 351 await assert.rejects( 352 () => 353 orCallLLM({ 354 model: 'openai/gpt-4o-mini', 355 messages: [{ role: 'user', content: 'test' }], 356 }), 357 { message: 'No content in OpenRouter API response' } 358 ); 359 }); 360 361 test('callLLM does not add response_format when json_mode is false', async () => { 362 mockPost.mock.mockImplementation(async () => ({ 363 data: { 364 choices: [{ message: { content: 'ok' } }], 365 usage: { prompt_tokens: 5, completion_tokens: 3 }, 366 }, 367 })); 368 369 await orCallLLM({ 370 model: 'openai/gpt-4o-mini', 371 messages: [{ role: 'user', content: 'test' }], 372 json_mode: false, 373 }); 374 375 const [, body] = mockPost.mock.calls[0].arguments; 376 assert.equal(body.response_format, undefined); 377 }); 378 379 test('callLLM adds json_object response_format when json_mode is true', async () => { 380 mockPost.mock.mockImplementation(async () => ({ 381 data: { 382 choices: [{ message: { content: '{}' } }], 383 usage: { prompt_tokens: 5, completion_tokens: 3 }, 384 }, 385 })); 386 387 await orCallLLM({ 388 model: 'openai/gpt-4o-mini', 389 messages: [{ role: 'user', content: 'test' }], 390 json_mode: true, 391 }); 392 393 const [, body] = mockPost.mock.calls[0].arguments; 394 assert.deepEqual(body.response_format, { type: 'json_object' }); 395 }); 396 397 test('callLLM uses 120000ms timeout', async () => { 398 mockPost.mock.mockImplementation(async () => ({ 399 data: { 400 choices: [{ message: { content: 'ok' } }], 401 usage: { prompt_tokens: 5, completion_tokens: 3 }, 402 }, 403 })); 404 405 await orCallLLM({ 406 model: 'openai/gpt-4o-mini', 407 messages: [{ role: 'user', content: 'test' }], 408 }); 409 410 const [, , config] = mockPost.mock.calls[0].arguments; 411 assert.equal(config.timeout, 120000); 412 }); 413 414 test('callLLM merges custom headers over defaults', async () => { 415 mockPost.mock.mockImplementation(async () => ({ 416 data: { 417 choices: [{ message: { content: 'ok' } }], 418 usage: { prompt_tokens: 5, completion_tokens: 3 }, 419 }, 420 })); 421 422 await orCallLLM({ 423 model: 'openai/gpt-4o-mini', 424 messages: [{ role: 'user', content: 'test' }], 425 headers: { 'X-Trace-ID': 'abc-123' }, 426 }); 427 428 const [, , config] = mockPost.mock.calls[0].arguments; 429 assert.equal(config.headers['X-Trace-ID'], 'abc-123'); 430 // Defaults should still be present 431 assert.ok(config.headers.Authorization.startsWith('Bearer ')); 432 assert.equal(config.headers['Content-Type'], 'application/json'); 433 }); 434 435 test('callLLM logs error details when response has status code', async () => { 436 const apiErr = new Error('Forbidden'); 437 apiErr.response = { status: 403, data: { error: 'Forbidden' } }; 438 mockPost.mock.mockImplementation(async () => { 439 throw apiErr; 440 }); 441 442 let capturedOutput = null; 443 const origWarn = console.warn; 444 console.warn = (...args) => { 445 capturedOutput = args.join(' '); 446 }; 447 448 try { 449 await assert.rejects( 450 () => 451 orCallLLM({ 452 model: 'openai/gpt-4o-mini', 453 messages: [{ role: 'user', content: 'test' }], 454 }), 455 { message: 'Forbidden' } 456 ); 457 assert.ok(capturedOutput !== null, 'console.warn should have been called'); 458 assert.ok(capturedOutput.includes('API Error details'), 'Should mention API Error details'); 459 assert.ok(capturedOutput.includes('"status": 403'), 'Should include status 403'); 460 assert.ok(capturedOutput.includes('"jsonMode": false'), 'Should report jsonMode false'); 461 assert.ok(capturedOutput.includes('"hasVision": false'), 'Should report hasVision false'); 462 } finally { 463 console.warn = origWarn; 464 } 465 }); 466 467 test('callLLM does not log API error details for network errors (no err.response)', async () => { 468 const netErr = new Error('Network timeout'); 469 // No .response property 470 mockPost.mock.mockImplementation(async () => { 471 throw netErr; 472 }); 473 474 let capturedOutput = ''; 475 const origWarn = console.warn; 476 console.warn = (...args) => { 477 capturedOutput += args.join(' '); 478 }; 479 480 try { 481 await assert.rejects( 482 () => 483 orCallLLM({ 484 model: 'openai/gpt-4o-mini', 485 messages: [{ role: 'user', content: 'test' }], 486 }), 487 { message: 'Network timeout' } 488 ); 489 // API Error details are only logged when err.response exists 490 assert.ok( 491 !capturedOutput.includes('API Error details'), 492 'Should not log API Error details for network-level errors' 493 ); 494 } finally { 495 console.warn = origWarn; 496 } 497 }); 498 499 test('callLLM reports hasVision=true when messages contain image_url items', async () => { 500 const apiErr = new Error('BadRequest'); 501 apiErr.response = { status: 400, data: {} }; 502 mockPost.mock.mockImplementation(async () => { 503 throw apiErr; 504 }); 505 506 let capturedOutput = null; 507 const origWarn = console.warn; 508 console.warn = (...args) => { 509 capturedOutput = args.join(' '); 510 }; 511 512 try { 513 await assert.rejects( 514 () => 515 orCallLLM({ 516 model: 'openai/gpt-4o-mini', 517 messages: [ 518 { 519 role: 'user', 520 content: [ 521 { type: 'text', text: 'look' }, 522 { type: 'image_url', image_url: { url: 'data:image/png;base64,abc' } }, 523 ], 524 }, 525 ], 526 json_mode: true, 527 }), 528 { message: 'BadRequest' } 529 ); 530 assert.ok(capturedOutput !== null, 'console.warn should have been called'); 531 assert.ok(capturedOutput.includes('"hasVision": true'), 'Should report hasVision true'); 532 assert.ok(capturedOutput.includes('"jsonMode": true'), 'Should report jsonMode true'); 533 } finally { 534 console.warn = origWarn; 535 } 536 }); 537 });