google.py
1 import os 2 from chainlit import logger 3 from openai import OpenAI 4 from typing import List, Optional, Dict, Set 5 from langchain_openai import ChatOpenAI 6 from langchain_google_genai import ChatGoogleGenerativeAI 7 from langchain_core.language_models.chat_models import BaseChatModel 8 from .base import LLMProvider 9 from ..capabilities import ModelCapability 10 11 12 class GoogleProvider(LLMProvider): 13 def create_model(self, name: str, model: str, tools: Optional[List] = None, **kwargs) -> BaseChatModel: 14 llm = ChatGoogleGenerativeAI( 15 name=name, 16 model=model, 17 max_tokens=None, 18 **kwargs 19 ) 20 return llm.bind_tools(tools) if tools else llm 21 22 def list_models(self) -> List[str]: 23 return [ 24 "gemini-2.5-pro", 25 "gemini-2.5-flash", 26 "gemini-2.5-flash-preview-09-2025", 27 "gemini-2.5-flash-lite", 28 "gemini-2.5-flash-image", 29 ] 30 31 @property 32 def name(self) -> str: 33 return "google" 34 35 @property 36 def capabilities(self) -> Dict[str, Set[ModelCapability]]: 37 return { 38 "gemini-2.5-pro": {ModelCapability.TEXT_TO_TEXT, ModelCapability.IMAGE_TO_TEXT, ModelCapability.TOOL_CALLING}, 39 "gemini-2.5-flash": {ModelCapability.TEXT_TO_TEXT, ModelCapability.IMAGE_TO_TEXT, ModelCapability.TOOL_CALLING}, 40 "gemini-2.5-flash-preview-09-2025": {ModelCapability.TEXT_TO_TEXT, ModelCapability.IMAGE_TO_TEXT, ModelCapability.TOOL_CALLING}, 41 "gemini-2.5-flash-lite": {ModelCapability.TEXT_TO_TEXT, ModelCapability.IMAGE_TO_TEXT, ModelCapability.TOOL_CALLING}, 42 "gemini-2.5-flash-image": {ModelCapability.TEXT_TO_TEXT, ModelCapability.IMAGE_TO_TEXT, ModelCapability.TEXT_TO_IMAGE, ModelCapability.TOOL_CALLING}, 43 }