/ tests / utils / llm-provider-mocked.test.js
llm-provider-mocked.test.js
  1  /**
  2   * Comprehensive Unit Tests for LLM Provider Module
  3   * Uses Node.js 22+ mock.module() to mock Anthropic SDK, axios, and dotenv
  4   * before importing the module under test.
  5   *
  6   * Tests both Anthropic and OpenRouter code paths with full mocking.
  7   * Each provider path gets a separate module import with cache-busting
  8   * to ensure independent initialization.
  9   */
 10  
 11  import { describe, test, mock, beforeEach } from 'node:test';
 12  import assert from 'node:assert/strict';
 13  
 14  // Save original env vars
 15  const origAnthropicKey = process.env.ANTHROPIC_API_KEY;
 16  const origOpenRouterKey = process.env.OPENROUTER_API_KEY;
 17  
 18  // ---------------------------------------------------------------------------
 19  // Set up mocks BEFORE any imports of the module under test
 20  // ---------------------------------------------------------------------------
 21  
 22  // Mock @anthropic-ai/sdk
 23  const mockCreate = mock.fn();
 24  let anthropicConstructorArgs = null;
 25  
 26  class MockAnthropic {
 27    constructor(opts) {
 28      anthropicConstructorArgs = opts;
 29      this.messages = { create: mockCreate };
 30    }
 31  }
 32  
 33  mock.module('@anthropic-ai/sdk', {
 34    defaultExport: MockAnthropic,
 35  });
 36  
 37  // Mock axios
 38  const mockAxiosPost = mock.fn();
 39  mock.module('axios', {
 40    defaultExport: { post: mockAxiosPost },
 41  });
 42  
 43  // Mock dotenv to prevent loading real .env which could override test env vars
 44  mock.module('dotenv', {
 45    defaultExport: { config: mock.fn() },
 46    namedExports: { config: mock.fn() },
 47  });
 48  
 49  // ---------------------------------------------------------------------------
 50  // Import OpenRouter-configured module (source is OpenRouter-only now)
 51  // ---------------------------------------------------------------------------
 52  process.env.OPENROUTER_API_KEY = 'test-openrouter-key';
 53  delete process.env.ANTHROPIC_API_KEY;
 54  
 55  const {
 56    callLLM: orCallLLM,
 57    getProvider: orGetProvider,
 58    getProviderDisplayName: orGetProviderDisplayName,
 59  } = await import('../../src/utils/llm-provider.js');
 60  
 61  // Restore env vars after imports
 62  if (origOpenRouterKey) {
 63    process.env.OPENROUTER_API_KEY = origOpenRouterKey;
 64  } else {
 65    delete process.env.OPENROUTER_API_KEY;
 66  }
 67  
 68  // NOTE: Anthropic provider tests removed — source is now OpenRouter-only.
 69  // See llm-provider.js for the refactored API.
 70  
 71  // REMOVED: ~430 lines of Anthropic-specific tests (model mapping, system message
 72  // extraction, image_url conversion, multi-block response handling, etc.)
 73  // These code paths no longer exist in the OpenRouter-only source.
 74  
 75  /* --- Anthropic tests removed (OpenRouter-only refactor) ---
 76      mockCreate.mock.mockImplementation(async () => ({
 77        content: [{ type: 'text', text: 'Hello world' }],
 78        usage: { input_tokens: 100, output_tokens: 50 },
 79      }));
 80  
 81      const result = await anthropicCallLLM({
 82        model: 'anthropic/claude-3.5-sonnet',
 83        messages: [{ role: 'user', content: 'Say hello' }],
 84      });
 85  
 86      assert.equal(result.content, 'Hello world');
 87      assert.deepEqual(result.usage, { promptTokens: 100, completionTokens: 50 });
 88    });
 89  
 90    // --- callLLM passes correct default parameters ---
 91  
 92    test('callLLM passes default temperature and max_tokens', async () => {
 93      mockCreate.mock.mockImplementation(async () => ({
 94        content: [{ type: 'text', text: 'ok' }],
 95        usage: { input_tokens: 10, output_tokens: 5 },
 96      }));
 97  
 98      await anthropicCallLLM({
 99        model: 'anthropic/claude-3.5-sonnet',
100        messages: [{ role: 'user', content: 'test' }],
101      });
102  
103      assert.equal(mockCreate.mock.callCount(), 1);
104      const params = mockCreate.mock.calls[0].arguments[0];
105      assert.equal(params.temperature, 0.7, 'default temperature should be 0.7');
106      assert.equal(params.max_tokens, 2000, 'default max_tokens should be 2000');
107    });
108  
109    // --- callLLM with custom temperature and max_tokens ---
110  
111    test('callLLM passes custom temperature and max_tokens', async () => {
112      mockCreate.mock.mockImplementation(async () => ({
113        content: [{ type: 'text', text: 'ok' }],
114        usage: { input_tokens: 10, output_tokens: 5 },
115      }));
116  
117      await anthropicCallLLM({
118        model: 'anthropic/claude-3.5-sonnet',
119        messages: [{ role: 'user', content: 'test' }],
120        temperature: 0.2,
121        max_tokens: 500,
122      });
123  
124      const params = mockCreate.mock.calls[0].arguments[0];
125      assert.equal(params.temperature, 0.2);
126      assert.equal(params.max_tokens, 500);
127    });
128  
129    // --- callLLM with system message ---
130  
131    test('callLLM extracts system message and passes as requestParams.system', async () => {
132      mockCreate.mock.mockImplementation(async () => ({
133        content: [{ type: 'text', text: 'response' }],
134        usage: { input_tokens: 50, output_tokens: 20 },
135      }));
136  
137      await anthropicCallLLM({
138        model: 'anthropic/claude-3.5-sonnet',
139        messages: [
140          { role: 'system', content: 'You are a helpful assistant.' },
141          { role: 'user', content: 'Hello' },
142        ],
143      });
144  
145      assert.equal(mockCreate.mock.callCount(), 1);
146      const params = mockCreate.mock.calls[0].arguments[0];
147      assert.equal(params.system, 'You are a helpful assistant.');
148      // System message should NOT be in the messages array
149      const roles = params.messages.map(m => m.role);
150      assert.ok(!roles.includes('system'), 'system role should be filtered from messages');
151      assert.equal(params.messages.length, 1);
152      assert.equal(params.messages[0].role, 'user');
153      assert.equal(params.messages[0].content, 'Hello');
154    });
155  
156    // --- callLLM without system message ---
157  
158    test('callLLM does not set system param when no system message present', async () => {
159      mockCreate.mock.mockImplementation(async () => ({
160        content: [{ type: 'text', text: 'response' }],
161        usage: { input_tokens: 10, output_tokens: 5 },
162      }));
163  
164      await anthropicCallLLM({
165        model: 'anthropic/claude-3.5-sonnet',
166        messages: [{ role: 'user', content: 'Hello' }],
167      });
168  
169      const params = mockCreate.mock.calls[0].arguments[0];
170      assert.ok(!('system' in params), 'system key should not exist when no system message');
171    });
172  
173    // --- callLLM with vision messages (image_url) ---
174  
175    test('callLLM converts image_url to Anthropic base64 format', async () => {
176      mockCreate.mock.mockImplementation(async () => ({
177        content: [{ type: 'text', text: 'I see an image' }],
178        usage: { input_tokens: 200, output_tokens: 30 },
179      }));
180  
181      const base64Payload = 'iVBORw0KGgoAAAANSUhEUg==';
182      await anthropicCallLLM({
183        model: 'openai/gpt-4o-mini',
184        messages: [
185          {
186            role: 'user',
187            content: [
188              { type: 'text', text: 'What is in this image?' },
189              {
190                type: 'image_url',
191                image_url: { url: `data:image/png;base64,${base64Payload}` },
192              },
193            ],
194          },
195        ],
196      });
197  
198      const params = mockCreate.mock.calls[0].arguments[0];
199      const userMsg = params.messages[0];
200      assert.equal(userMsg.content.length, 2);
201  
202      // Text item
203      assert.equal(userMsg.content[0].type, 'text');
204      assert.equal(userMsg.content[0].text, 'What is in this image?');
205  
206      // Image item - converted to Anthropic format
207      const imageItem = userMsg.content[1];
208      assert.equal(imageItem.type, 'image');
209      assert.equal(imageItem.source.type, 'base64');
210      assert.equal(imageItem.source.media_type, 'image/png');
211      assert.equal(imageItem.source.data, base64Payload);
212    });
213  
214    // --- callLLM with JPEG image ---
215  
216    test('callLLM handles JPEG image_url with correct media type', async () => {
217      mockCreate.mock.mockImplementation(async () => ({
218        content: [{ type: 'text', text: 'ok' }],
219        usage: { input_tokens: 10, output_tokens: 5 },
220      }));
221  
222      await anthropicCallLLM({
223        model: 'anthropic/claude-3.5-sonnet',
224        messages: [
225          {
226            role: 'user',
227            content: [
228              {
229                type: 'image_url',
230                image_url: { url: 'data:image/jpeg;base64,/9j/4AAQ==' },
231              },
232            ],
233          },
234        ],
235      });
236  
237      const params = mockCreate.mock.calls[0].arguments[0];
238      const imageItem = params.messages[0].content[0];
239      assert.equal(imageItem.source.media_type, 'image/jpeg');
240      assert.equal(imageItem.source.data, '/9j/4AAQ==');
241    });
242  
243    // --- callLLM image fallback media type ---
244  
245    test('callLLM falls back to image/jpeg for unrecognized image format', async () => {
246      mockCreate.mock.mockImplementation(async () => ({
247        content: [{ type: 'text', text: 'ok' }],
248        usage: { input_tokens: 10, output_tokens: 5 },
249      }));
250  
251      await anthropicCallLLM({
252        model: 'anthropic/claude-3.5-sonnet',
253        messages: [
254          {
255            role: 'user',
256            content: [
257              {
258                type: 'image_url',
259                image_url: { url: 'not-a-data-url' },
260              },
261            ],
262          },
263        ],
264      });
265  
266      const params = mockCreate.mock.calls[0].arguments[0];
267      const imageItem = params.messages[0].content[0];
268      assert.equal(imageItem.source.media_type, 'image/jpeg');
269    });
270  
271    // --- Model mapping tests ---
272  
273    test('callLLM maps openai/gpt-4o-mini to claude-3-5-sonnet-20241022', async () => {
274      mockCreate.mock.mockImplementation(async () => ({
275        content: [{ type: 'text', text: 'ok' }],
276        usage: { input_tokens: 10, output_tokens: 5 },
277      }));
278  
279      await anthropicCallLLM({
280        model: 'openai/gpt-4o-mini',
281        messages: [{ role: 'user', content: 'test' }],
282      });
283  
284      assert.equal(mockCreate.mock.calls[0].arguments[0].model, 'claude-3-5-sonnet-20241022');
285    });
286  
287    test('callLLM maps anthropic/claude-3.5-haiku to claude-3-5-haiku-20241022', async () => {
288      mockCreate.mock.mockImplementation(async () => ({
289        content: [{ type: 'text', text: 'ok' }],
290        usage: { input_tokens: 10, output_tokens: 5 },
291      }));
292  
293      await anthropicCallLLM({
294        model: 'anthropic/claude-3.5-haiku',
295        messages: [{ role: 'user', content: 'test' }],
296      });
297  
298      assert.equal(mockCreate.mock.calls[0].arguments[0].model, 'claude-3-5-haiku-20241022');
299    });
300  
301    test('callLLM maps anthropic/claude-3.5-sonnet-20241022 correctly', async () => {
302      mockCreate.mock.mockImplementation(async () => ({
303        content: [{ type: 'text', text: 'ok' }],
304        usage: { input_tokens: 10, output_tokens: 5 },
305      }));
306  
307      await anthropicCallLLM({
308        model: 'anthropic/claude-3.5-sonnet-20241022',
309        messages: [{ role: 'user', content: 'test' }],
310      });
311  
312      assert.equal(mockCreate.mock.calls[0].arguments[0].model, 'claude-3-5-sonnet-20241022');
313    });
314  
315    test('callLLM maps anthropic/claude-3.5-haiku-20241022 correctly', async () => {
316      mockCreate.mock.mockImplementation(async () => ({
317        content: [{ type: 'text', text: 'ok' }],
318        usage: { input_tokens: 10, output_tokens: 5 },
319      }));
320  
321      await anthropicCallLLM({
322        model: 'anthropic/claude-3.5-haiku-20241022',
323        messages: [{ role: 'user', content: 'test' }],
324      });
325  
326      assert.equal(mockCreate.mock.calls[0].arguments[0].model, 'claude-3-5-haiku-20241022');
327    });
328  
329    test('callLLM defaults unknown model to claude-3-5-sonnet-20241022', async () => {
330      mockCreate.mock.mockImplementation(async () => ({
331        content: [{ type: 'text', text: 'ok' }],
332        usage: { input_tokens: 10, output_tokens: 5 },
333      }));
334  
335      await anthropicCallLLM({
336        model: 'some-random/unknown-model',
337        messages: [{ role: 'user', content: 'test' }],
338      });
339  
340      assert.equal(mockCreate.mock.calls[0].arguments[0].model, 'claude-3-5-sonnet-20241022');
341    });
342  
343    // --- Error handling ---
344  
345    test('callLLM throws when Anthropic response has no text content', async () => {
346      mockCreate.mock.mockImplementation(async () => ({
347        content: [],
348        usage: { input_tokens: 10, output_tokens: 0 },
349      }));
350  
351      await assert.rejects(
352        () =>
353          anthropicCallLLM({
354            model: 'anthropic/claude-3.5-sonnet',
355            messages: [{ role: 'user', content: 'test' }],
356          }),
357        { message: 'No content in Anthropic API response' }
358      );
359    });
360  
361    test('callLLM throws when response has only non-text content blocks', async () => {
362      mockCreate.mock.mockImplementation(async () => ({
363        content: [{ type: 'tool_use', id: 'tool_1', name: 'search', input: {} }],
364        usage: { input_tokens: 10, output_tokens: 5 },
365      }));
366  
367      await assert.rejects(
368        () =>
369          anthropicCallLLM({
370            model: 'anthropic/claude-3.5-sonnet',
371            messages: [{ role: 'user', content: 'test' }],
372          }),
373        { message: 'No content in Anthropic API response' }
374      );
375    });
376  
377    // --- Multi-block response handling ---
378  
379    test('callLLM joins multiple text content blocks with newline', async () => {
380      mockCreate.mock.mockImplementation(async () => ({
381        content: [
382          { type: 'text', text: 'Part one' },
383          { type: 'text', text: 'Part two' },
384        ],
385        usage: { input_tokens: 20, output_tokens: 10 },
386      }));
387  
388      const result = await anthropicCallLLM({
389        model: 'anthropic/claude-3.5-sonnet',
390        messages: [{ role: 'user', content: 'test' }],
391      });
392  
393      assert.equal(result.content, 'Part one\nPart two');
394    });
395  
396    test('callLLM filters non-text blocks and extracts only text', async () => {
397      mockCreate.mock.mockImplementation(async () => ({
398        content: [
399          { type: 'tool_use', id: 'tool_1', name: 'search', input: {} },
400          { type: 'text', text: 'Here is the answer' },
401        ],
402        usage: { input_tokens: 30, output_tokens: 15 },
403      }));
404  
405      const result = await anthropicCallLLM({
406        model: 'anthropic/claude-3.5-sonnet',
407        messages: [{ role: 'user', content: 'test' }],
408      });
409  
410      assert.equal(result.content, 'Here is the answer');
411    });
412  
413    // --- SDK error propagation ---
414  
415    test('callLLM propagates Anthropic SDK errors', async () => {
416      mockCreate.mock.mockImplementation(async () => {
417        throw new Error('API rate limit exceeded');
418      });
419  
420      await assert.rejects(
421        () =>
422          anthropicCallLLM({
423            model: 'anthropic/claude-3.5-sonnet',
424            messages: [{ role: 'user', content: 'test' }],
425          }),
426        { message: 'API rate limit exceeded' }
427      );
428    });
429  
430    // --- Multi-turn conversation ---
431  
432    test('callLLM handles multi-turn conversation messages', async () => {
433      mockCreate.mock.mockImplementation(async () => ({
434        content: [{ type: 'text', text: 'The capital is Paris.' }],
435        usage: { input_tokens: 80, output_tokens: 10 },
436      }));
437  
438      await anthropicCallLLM({
439        model: 'anthropic/claude-3.5-sonnet',
440        messages: [
441          { role: 'system', content: 'You are a geography expert.' },
442          { role: 'user', content: 'What is the capital of France?' },
443          { role: 'assistant', content: 'Let me think about that.' },
444          { role: 'user', content: 'Please answer directly.' },
445        ],
446      });
447  
448      const params = mockCreate.mock.calls[0].arguments[0];
449      assert.equal(params.system, 'You are a geography expert.');
450      assert.equal(params.messages.length, 3);
451      assert.equal(params.messages[0].role, 'user');
452      assert.equal(params.messages[0].content, 'What is the capital of France?');
453      assert.equal(params.messages[1].role, 'assistant');
454      assert.equal(params.messages[1].content, 'Let me think about that.');
455      assert.equal(params.messages[2].role, 'user');
456      assert.equal(params.messages[2].content, 'Please answer directly.');
457    });
458  
459    // --- Passthrough content items ---
460  
461    test('callLLM passes through unrecognized content item types as-is', async () => {
462      mockCreate.mock.mockImplementation(async () => ({
463        content: [{ type: 'text', text: 'ok' }],
464        usage: { input_tokens: 10, output_tokens: 5 },
465      }));
466  
467      const customItem = { type: 'custom', data: 'something' };
468      await anthropicCallLLM({
469        model: 'anthropic/claude-3.5-sonnet',
470        messages: [
471          {
472            role: 'user',
473            content: [{ type: 'text', text: 'test' }, customItem],
474          },
475        ],
476      });
477  
478      const params = mockCreate.mock.calls[0].arguments[0];
479      const contentItems = params.messages[0].content;
480      assert.equal(contentItems.length, 2);
481      assert.deepEqual(contentItems[1], customItem);
482    });
483  
484    // --- Empty system message ---
485  
486    test('callLLM does not set system param for empty string system message', async () => {
487      mockCreate.mock.mockImplementation(async () => ({
488        content: [{ type: 'text', text: 'ok' }],
489        usage: { input_tokens: 10, output_tokens: 5 },
490      }));
491  
492      await anthropicCallLLM({
493        model: 'anthropic/claude-3.5-sonnet',
494        messages: [
495          { role: 'system', content: '' },
496          { role: 'user', content: 'test' },
497        ],
498      });
499  
500      const params = mockCreate.mock.calls[0].arguments[0];
501  --- end removed Anthropic tests */
502  
503  // ---------------------------------------------------------------------------
504  // OPENROUTER PROVIDER TESTS
505  // ---------------------------------------------------------------------------
506  describe('LLM Provider - OpenRouter path', () => {
507    beforeEach(() => {
508      mockAxiosPost.mock.resetCalls();
509    });
510  
511    // --- getProvider ---
512  
513    test('getProvider returns "openrouter"', () => {
514      assert.equal(orGetProvider(), 'openrouter');
515    });
516  
517    // --- getProviderDisplayName ---
518  
519    test('getProviderDisplayName returns "OpenRouter"', () => {
520      assert.equal(orGetProviderDisplayName(), 'OpenRouter');
521    });
522  
523    // --- callLLM returns content and usage ---
524  
525    test('callLLM returns content and usage from OpenRouter response', async () => {
526      mockAxiosPost.mock.mockImplementation(async () => ({
527        data: {
528          choices: [{ message: { content: 'Hello from OpenRouter' } }],
529          usage: { prompt_tokens: 100, completion_tokens: 50 },
530        },
531      }));
532  
533      const result = await orCallLLM({
534        model: 'openai/gpt-4o-mini',
535        messages: [{ role: 'user', content: 'Say hello' }],
536      });
537  
538      assert.equal(result.content, 'Hello from OpenRouter');
539      assert.deepEqual(result.usage, { promptTokens: 100, completionTokens: 50 });
540    });
541  
542    // --- callLLM sends correct request ---
543  
544    test('callLLM sends request to correct endpoint with auth headers', async () => {
545      mockAxiosPost.mock.mockImplementation(async () => ({
546        data: {
547          choices: [{ message: { content: 'ok' } }],
548          usage: { prompt_tokens: 10, completion_tokens: 5 },
549        },
550      }));
551  
552      await orCallLLM({
553        model: 'openai/gpt-4o-mini',
554        messages: [{ role: 'user', content: 'test' }],
555      });
556  
557      assert.equal(mockAxiosPost.mock.callCount(), 1);
558      const [url, body, config] = mockAxiosPost.mock.calls[0].arguments;
559  
560      assert.equal(url, 'https://openrouter.ai/api/v1/chat/completions');
561      assert.equal(body.model, 'openai/gpt-4o-mini');
562      assert.deepEqual(body.messages, [{ role: 'user', content: 'test' }]);
563      assert.equal(body.temperature, 0.7);
564      assert.equal(body.max_tokens, 2000);
565      assert.equal(config.headers.Authorization, 'Bearer test-openrouter-key');
566      assert.equal(config.headers['HTTP-Referer'], 'https://333method.local');
567      assert.equal(config.headers['X-Title'], '333 Method Automation');
568      assert.equal(config.headers['Content-Type'], 'application/json');
569      assert.equal(config.timeout, 120000);
570    });
571  
572    // --- Custom temperature and max_tokens ---
573  
574    test('callLLM passes custom temperature and max_tokens', async () => {
575      mockAxiosPost.mock.mockImplementation(async () => ({
576        data: {
577          choices: [{ message: { content: 'ok' } }],
578          usage: { prompt_tokens: 10, completion_tokens: 5 },
579        },
580      }));
581  
582      await orCallLLM({
583        model: 'openai/gpt-4o-mini',
584        messages: [{ role: 'user', content: 'test' }],
585        temperature: 0.1,
586        max_tokens: 4000,
587      });
588  
589      const [, body] = mockAxiosPost.mock.calls[0].arguments;
590      assert.equal(body.temperature, 0.1);
591      assert.equal(body.max_tokens, 4000);
592    });
593  
594    // --- json_mode ---
595  
596    test('callLLM adds response_format when json_mode is true', async () => {
597      mockAxiosPost.mock.mockImplementation(async () => ({
598        data: {
599          choices: [{ message: { content: '{"key": "value"}' } }],
600          usage: { prompt_tokens: 10, completion_tokens: 5 },
601        },
602      }));
603  
604      await orCallLLM({
605        model: 'openai/gpt-4o-mini',
606        messages: [{ role: 'user', content: 'Return JSON' }],
607        json_mode: true,
608      });
609  
610      const [, body] = mockAxiosPost.mock.calls[0].arguments;
611      assert.deepEqual(body.response_format, { type: 'json_object' });
612    });
613  
614    test('callLLM does not add response_format when json_mode is false', async () => {
615      mockAxiosPost.mock.mockImplementation(async () => ({
616        data: {
617          choices: [{ message: { content: 'ok' } }],
618          usage: { prompt_tokens: 10, completion_tokens: 5 },
619        },
620      }));
621  
622      await orCallLLM({
623        model: 'openai/gpt-4o-mini',
624        messages: [{ role: 'user', content: 'test' }],
625        json_mode: false,
626      });
627  
628      const [, body] = mockAxiosPost.mock.calls[0].arguments;
629      assert.equal(body.response_format, undefined);
630    });
631  
632    // --- Custom headers ---
633  
634    test('callLLM merges custom headers with defaults', async () => {
635      mockAxiosPost.mock.mockImplementation(async () => ({
636        data: {
637          choices: [{ message: { content: 'ok' } }],
638          usage: { prompt_tokens: 10, completion_tokens: 5 },
639        },
640      }));
641  
642      await orCallLLM({
643        model: 'openai/gpt-4o-mini',
644        messages: [{ role: 'user', content: 'test' }],
645        headers: { 'X-Custom-Header': 'custom-value' },
646      });
647  
648      const [, , config] = mockAxiosPost.mock.calls[0].arguments;
649      assert.equal(config.headers['X-Custom-Header'], 'custom-value');
650      assert.equal(config.headers.Authorization, 'Bearer test-openrouter-key');
651    });
652  
653    // --- Missing usage data ---
654  
655    test('callLLM defaults usage to zero when response omits usage', async () => {
656      mockAxiosPost.mock.mockImplementation(async () => ({
657        data: {
658          choices: [{ message: { content: 'ok' } }],
659        },
660      }));
661  
662      const result = await orCallLLM({
663        model: 'openai/gpt-4o-mini',
664        messages: [{ role: 'user', content: 'test' }],
665      });
666  
667      assert.deepEqual(result.usage, { promptTokens: 0, completionTokens: 0 });
668    });
669  
670    // --- No content errors ---
671  
672    test('callLLM throws when response has no content', async () => {
673      mockAxiosPost.mock.mockImplementation(async () => ({
674        data: {
675          choices: [{ message: {} }],
676          usage: { prompt_tokens: 10, completion_tokens: 0 },
677        },
678      }));
679  
680      await assert.rejects(
681        () =>
682          orCallLLM({
683            model: 'openai/gpt-4o-mini',
684            messages: [{ role: 'user', content: 'test' }],
685          }),
686        { message: 'No content in OpenRouter API response' }
687      );
688    });
689  
690    test('callLLM throws when response has empty choices array', async () => {
691      mockAxiosPost.mock.mockImplementation(async () => ({
692        data: {
693          choices: [],
694          usage: { prompt_tokens: 10, completion_tokens: 0 },
695        },
696      }));
697  
698      await assert.rejects(
699        () =>
700          orCallLLM({
701            model: 'openai/gpt-4o-mini',
702            messages: [{ role: 'user', content: 'test' }],
703          }),
704        { message: 'No content in OpenRouter API response' }
705      );
706    });
707  
708    // --- Enhanced error logging ---
709  
710    test('callLLM logs enhanced error details for API errors with response', async () => {
711      const apiError = new Error('Bad Request');
712      apiError.response = {
713        status: 400,
714        data: { error: { message: 'Invalid model' } },
715      };
716  
717      mockAxiosPost.mock.mockImplementation(async () => {
718        throw apiError;
719      });
720  
721      const origConsoleWarn = console.warn;
722      let loggedOutput = null;
723      console.warn = (...args) => {
724        loggedOutput = args.join(' ');
725      };
726  
727      try {
728        await assert.rejects(
729          () =>
730            orCallLLM({
731              model: 'openai/gpt-4o-mini',
732              messages: [{ role: 'user', content: 'test' }],
733            }),
734          { message: 'Bad Request' }
735        );
736  
737        assert.ok(loggedOutput, 'console.warn should have been called');
738        assert.ok(loggedOutput.includes('API Error details'), 'Should mention API Error details');
739        assert.ok(loggedOutput.includes('"status": 400'), 'Should include status 400');
740        assert.ok(loggedOutput.includes('"model"'), 'Should include model');
741        assert.ok(loggedOutput.includes('"hasVision": false'), 'Should report hasVision false');
742        assert.ok(loggedOutput.includes('"jsonMode": false'), 'Should report jsonMode false');
743      } finally {
744        console.warn = origConsoleWarn;
745      }
746    });
747  
748    test('callLLM error logging reports hasVision=true for image messages', async () => {
749      const apiError = new Error('Server Error');
750      apiError.response = {
751        status: 500,
752        data: { error: 'Internal' },
753      };
754  
755      mockAxiosPost.mock.mockImplementation(async () => {
756        throw apiError;
757      });
758  
759      const origConsoleWarn = console.warn;
760      let loggedOutput = null;
761      console.warn = (...args) => {
762        loggedOutput = args.join(' ');
763      };
764  
765      try {
766        await assert.rejects(
767          () =>
768            orCallLLM({
769              model: 'openai/gpt-4o-mini',
770              messages: [
771                {
772                  role: 'user',
773                  content: [
774                    { type: 'text', text: 'Describe' },
775                    { type: 'image_url', image_url: { url: 'data:image/png;base64,abc' } },
776                  ],
777                },
778              ],
779              json_mode: true,
780            }),
781          { message: 'Server Error' }
782        );
783  
784        assert.ok(loggedOutput, 'console.warn should have been called');
785        assert.ok(loggedOutput.includes('"hasVision": true'), 'Should report hasVision true');
786        assert.ok(loggedOutput.includes('"jsonMode": true'), 'Should report jsonMode true');
787      } finally {
788        console.warn = origConsoleWarn;
789      }
790    });
791  
792    test('callLLM rethrows network errors without enhanced logging', async () => {
793      const networkError = new Error('ECONNREFUSED');
794      // No .response property - network-level error
795  
796      mockAxiosPost.mock.mockImplementation(async () => {
797        throw networkError;
798      });
799  
800      const origConsoleError = console.error;
801      let errorLogged = false;
802      console.error = () => {
803        errorLogged = true;
804      };
805  
806      try {
807        await assert.rejects(
808          () =>
809            orCallLLM({
810              model: 'openai/gpt-4o-mini',
811              messages: [{ role: 'user', content: 'test' }],
812            }),
813          { message: 'ECONNREFUSED' }
814        );
815  
816        assert.equal(errorLogged, false, 'Should not log enhanced error for network errors');
817      } finally {
818        console.error = origConsoleError;
819      }
820    });
821  
822    // --- System messages pass through ---
823  
824    test('callLLM passes system messages to OpenRouter without conversion', async () => {
825      mockAxiosPost.mock.mockImplementation(async () => ({
826        data: {
827          choices: [{ message: { content: 'ok' } }],
828          usage: { prompt_tokens: 10, completion_tokens: 5 },
829        },
830      }));
831  
832      const messages = [
833        { role: 'system', content: 'You are helpful' },
834        { role: 'user', content: 'Hello' },
835      ];
836  
837      await orCallLLM({
838        model: 'openai/gpt-4o-mini',
839        messages,
840      });
841  
842      const [, body] = mockAxiosPost.mock.calls[0].arguments;
843      assert.deepEqual(body.messages, messages);
844    });
845  });