tool_context.py
1 # Copyright (c) 2024-2026 Tencent Zhuque Lab. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 # Requirement: Any integration or derivative work must explicitly attribute 16 # Tencent Zhuque Lab (https://github.com/Tencent/AI-Infra-Guard) in its 17 # documentation or user interface, as detailed in the NOTICE file. 18 19 """ 20 工具执行上下文 - 提供工具运行所需的环境信息 21 """ 22 from typing import List, Dict, Any, Optional, TYPE_CHECKING 23 24 from core.agent_adapter.adapter import AIProviderClient, ProviderOptions 25 26 if TYPE_CHECKING: # pragma: no cover 27 from tools.dispatcher import ToolDispatcher 28 from utils.llm import LLM 29 30 31 class ToolContext: 32 """工具执行上下文,包含历史记录、LLM实例等信息""" 33 34 def __init__( 35 self, 36 llm: LLM = None, 37 history: List[Dict[str, str]] = [], 38 agent_name: str = "Agent", 39 iteration: int = 0, 40 specialized_llms: Optional[Dict[str, LLM]] = None, 41 folder: Optional[str] = None, 42 agent_provider: Optional[ProviderOptions] = None, 43 language: str = "zh" 44 ): 45 """ 46 初始化工具上下文 47 """ 48 self.llm = llm 49 self.history = history 50 self.agent_name = agent_name 51 self.iteration = iteration 52 self.specialized_llms = specialized_llms or {} 53 self.folder = folder 54 self.client = AIProviderClient() 55 self.agent_provider: ProviderOptions = agent_provider 56 self.language = language 57 58 def get_llm(self, purpose: str = "default") -> LLM: 59 """ 60 根据用途获取合适的LLM 61 62 Args: 63 purpose: LLM用途,如 "thinking", "coding", "default" 64 65 Returns: 66 LLM实例 67 """ 68 if purpose in self.specialized_llms: 69 return self.specialized_llms[purpose] 70 return self.llm 71 72 def get_recent_history(self, n: int = 5) -> List[Dict[str, str]]: 73 """ 74 获取最近的n条历史记录 75 76 Args: 77 n: 历史记录条数 78 79 Returns: 80 历史记录列表 81 """ 82 return self.history[-n:] if len(self.history) > n else self.history 83 84 def call_provider(self, prompt: str): 85 if self.agent_provider is None: 86 raise ValueError("Agent provider not set") 87 return self.client.call_provider(self.agent_provider, prompt) 88 89 def call_llm( 90 self, 91 prompt: str, 92 purpose: str = "default", 93 system_prompt: Optional[str] = None, 94 use_history: bool = False 95 ) -> str: 96 """ 97 调用LLM获取响应 98 99 Args: 100 prompt: 用户提示 101 purpose: LLM用途 102 system_prompt: 系统提示(可选) 103 use_history: 是否使用历史记录 104 105 Returns: 106 LLM响应内容 107 """ 108 llm = self.get_llm(purpose) 109 110 messages = [] 111 112 # 添加系统提示 113 if system_prompt: 114 messages.append({"role": "system", "content": system_prompt}) 115 116 # 添加历史记录(如果需要) 117 if use_history: 118 messages.extend(self.history[1:]) 119 120 # 添加当前提示 121 messages.append({"role": "user", "content": prompt}) 122 123 return llm.chat(messages) 124 125 def call_llm_messages( 126 self, 127 messages, 128 purpose: str = "default", 129 ) -> str: 130 llm = self.get_llm(purpose) 131 return llm.chat(messages) 132 133 async def call_subagent(self, description: str, template: str, prompt: str, stage_id: str, 134 language: str = "zh", repo_dir: str | None = None, context_data: dict | None = None): 135 # Lazy imports to avoid circular dependency 136 from core.base_agent import run_agent 137 from tools.task.task import load_agent_prompt, get_all_agents 138 agent_instruction = load_agent_prompt(template) 139 140 if agent_instruction is None: 141 available = get_all_agents() 142 available_names = [a['name'] for a in available] 143 144 return { 145 "success": False, 146 "error": f"Unknown agent type: {template}. Available agents: {', '.join(available_names) if available_names else 'none'}" 147 } 148 149 instruction = load_agent_prompt(template)["raw"] 150 result = await run_agent(description, instruction, self.llm, prompt, stage_id, self.specialized_llms, 151 self.agent_provider, language, 152 repo_dir, 153 context_data) 154 return result