/ app / src / services / llm.ts
llm.ts
  1  import { invoke } from '@tauri-apps/api/core';
  2  import { listen } from '@tauri-apps/api/event';
  3  import type {
  4    Message,
  5    LLMResponse,
  6    StreamChunk,
  7    ProviderStatus,
  8    ModelInfo,
  9    Provider,
 10  } from '../types/llm';
 11  
 12  export class LLMService {
 13    /**
 14     * List all available LLM providers
 15     */
 16    static async listProviders(): Promise<string[]> {
 17      return invoke<string[]>('list_providers');
 18    }
 19  
 20    /**
 21     * Get status of a specific provider
 22     */
 23    static async getProviderStatus(provider: Provider): Promise<ProviderStatus> {
 24      return invoke<ProviderStatus>('get_provider_status', { provider });
 25    }
 26  
 27    /**
 28     * List available models for a provider
 29     */
 30    static async listModels(provider: Provider): Promise<ModelInfo[]> {
 31      return invoke<ModelInfo[]>('list_models', { provider });
 32    }
 33  
 34    /**
 35     * Send a message and get complete response
 36     */
 37    static async sendMessage(
 38      provider: Provider,
 39      messages: Message[],
 40      model?: string,
 41      temperature?: number,
 42      maxTokens?: number
 43    ): Promise<LLMResponse> {
 44      return invoke<LLMResponse>('send_message', {
 45        provider,
 46        model,
 47        messages,
 48        temperature,
 49        maxTokens,
 50      });
 51    }
 52  
 53    /**
 54     * Set the current provider
 55     */
 56    static async setCurrentProvider(provider: Provider): Promise<void> {
 57      return invoke<void>('set_current_provider', { provider });
 58    }
 59  
 60    /**
 61     * Stream a message with callbacks for chunks
 62     */
 63    static async streamMessage(
 64      provider: Provider,
 65      messages: Message[],
 66      options: {
 67        model?: string;
 68        temperature?: number;
 69        maxTokens?: number;
 70        onChunk?: (chunk: StreamChunk) => void;
 71        onError?: (error: string) => void;
 72        onComplete?: () => void;
 73      } = {}
 74    ): Promise<() => void> {
 75      const { model, temperature, maxTokens, onChunk, onError, onComplete } = options;
 76  
 77      // Set up event listeners
 78      const unlistenChunk = await listen<StreamChunk>('llm-stream-chunk', (event) => {
 79        onChunk?.(event.payload);
 80      });
 81  
 82      const unlistenError = await listen<string>('llm-stream-error', (event) => {
 83        onError?.(event.payload);
 84      });
 85  
 86      const unlistenComplete = await listen('llm-stream-complete', () => {
 87        onComplete?.();
 88      });
 89  
 90      // Start streaming
 91      await invoke('stream_message', {
 92        provider,
 93        model,
 94        messages,
 95        temperature,
 96        maxTokens,
 97      });
 98  
 99      // Return cleanup function
100      return () => {
101        unlistenChunk();
102        unlistenError();
103        unlistenComplete();
104      };
105    }
106  }