/ examples / js / providers / local-ollama.ts
local-ollama.ts
 1  /**
 2   * Local Ollama Provider Example
 3   * 
 4   * Demonstrates using local Ollama models with PraisonAI.
 5   * 
 6   * Setup:
 7   * 1. Install Ollama: https://ollama.ai
 8   * 2. Pull a model: ollama pull llama3.2
 9   * 3. Run this example
10   * 
11   * Optional env var:
12   * - OLLAMA_BASE_URL (default: http://localhost:11434)
13   */
14  
15  import { Agent } from 'praisonai';
16  
17  async function main() {
18    // Ollama with llama3.2
19    const ollamaAgent = new Agent({
20      name: 'LocalLlama',
21      instructions: 'You are a helpful local AI assistant running on Ollama.',
22      llm: 'ollama/llama3.2',
23      llmConfig: {
24        baseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434'
25      }
26    });
27  
28    console.log('Testing Ollama local model...\n');
29  
30    try {
31      const response = await ollamaAgent.chat('Hello! What model are you running?');
32      console.log('Response:', response);
33    } catch (error: any) {
34      console.log('Note: Ollama not running or model not pulled.');
35      console.log('Start Ollama and run: ollama pull llama3.2');
36      console.log('Error:', error.message);
37    }
38  }
39  
40  main().catch(console.error);