base.py
1 # Copyright (c) 2024-2026 Tencent Zhuque Lab. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 # Requirement: Any integration or derivative work must explicitly attribute 16 # Tencent Zhuque Lab (https://github.com/Tencent/AI-Infra-Guard) in its 17 # documentation or user interface, as detailed in the NOTICE file. 18 19 from abc import abstractmethod 20 from typing import Tuple 21 from deepeval.models.base_model import DeepEvalBaseLLM 22 import asyncio 23 24 class BaseLLM(DeepEvalBaseLLM): 25 def __init__(self, model_name: str, base_url: str, api_key: str, max_concurrent: int): 26 self.model_name = model_name 27 self.base_url = base_url 28 self.api_key = api_key 29 self.max_concurrent = max_concurrent 30 self.semaphore = asyncio.Semaphore(max_concurrent) 31 32 @abstractmethod 33 def load_model(self, *args, **kwargs): 34 """Loads a model, that will be responsible for scoring. 35 36 Returns: 37 A model object 38 """ 39 pass 40 41 @abstractmethod 42 def test_model_connection(self) -> Tuple[bool, str]: 43 """ 44 Check if the specified model is properly connected and responsive. 45 46 Returns: 47 Tuple[bool, str]: 48 - bool: True if the model is successfully connected, False otherwise. 49 - str: Success message if connected, or detailed error information if connection failed. 50 51 Example: 52 >>> is_connected, message = test_model_connection(model) 53 >>> print(f"Connected: {is_connected}, Message: {message}") 54 Connected: True, Message: Model responded successfully in 200ms 55 """ 56 pass 57 58 @abstractmethod 59 def generate(self, prompt: str = None, messages: list = None, *args, **kwargs) -> str: 60 """Runs the model to output LLM response. 61 62 Returns: 63 A string. 64 """ 65 pass 66 67 @abstractmethod 68 async def a_generate(self, prompt: str = None, messages: list = None, *args, **kwargs) -> str: 69 """Runs the model to output LLM response. 70 71 Returns: 72 A string. 73 """ 74 async with self.semaphore: 75 pass 76 77 @abstractmethod 78 def get_model_name(self, *args, **kwargs) -> str: 79 pass