diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 9665f0291..2c824de2f 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -28,7 +28,6 @@ from src.chat.focus_chat.planners.action_manager import ActionManager from src.config.config import global_config from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger from src.chat.focus_chat.hfc_version_manager import get_hfc_version -from src.chat.focus_chat.info.structured_info import StructuredInfo from src.person_info.relationship_builder_manager import relationship_builder_manager @@ -218,8 +217,6 @@ class HeartFChatting: else: logger.warning(f"{self.log_prefix} 没有注册任何处理器。这可能是由于配置错误或所有处理器都被禁用了。") - - async def start(self): """检查是否需要启动主循环,如果未激活则启动。""" logger.debug(f"{self.log_prefix} 开始启动 HeartFChatting") @@ -400,8 +397,6 @@ class HeartFChatting: ("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else "" ) - - logger.info( f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考," f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, " @@ -560,8 +555,6 @@ class HeartFChatting: return all_plan_info, processor_time_costs - - async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict: try: loop_start_time = time.time() diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 532f19f3a..9ce289ba9 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -157,20 +157,13 @@ class DefaultReplyer: fallback_config = global_config.model.replyer_1.copy() fallback_config.setdefault("weight", 1.0) self.express_model_configs = [fallback_config] - + self.chat_stream = chat_stream self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id) - + self.heart_fc_sender = HeartFCSender() self.memory_activator = MemoryActivator() - self.tool_executor = ToolExecutor( - chat_id=self.chat_stream.stream_id, - enable_cache=True, - cache_ttl=3 - ) - - - + self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3) def _select_weighted_model_config(self) -> Dict[str, Any]: """使用加权随机选择来挑选一个模型配置""" @@ -405,48 +398,45 @@ class DefaultReplyer: async def build_tool_info(self, reply_data=None, chat_history=None): """构建工具信息块 - + Args: reply_data: 回复数据,包含要回复的消息内容 chat_history: 聊天历史 - + Returns: str: 工具信息字符串 """ if not reply_data: return "" - + reply_to = reply_data.get("reply_to", "") sender, text = self._parse_reply_target(reply_to) - + if not text: return "" - + try: # 使用工具执行器获取信息 tool_results = await self.tool_executor.execute_from_chat_message( - sender = sender, - target_message=text, - chat_history=chat_history, - return_details=False + sender=sender, target_message=text, chat_history=chat_history, return_details=False ) - + if tool_results: tool_info_str = "以下是你通过工具获取到的实时信息:\n" for tool_result in tool_results: tool_name = tool_result.get("tool_name", "unknown") content = tool_result.get("content", "") result_type = tool_result.get("type", "info") - + tool_info_str += f"- 【{tool_name}】{result_type}: {content}\n" - + tool_info_str += "以上是你获取到的实时信息,请在回复时参考这些信息。" logger.info(f"{self.log_prefix} 获取到 {len(tool_results)} 个工具结果") return tool_info_str else: logger.debug(f"{self.log_prefix} 未获取到任何工具结果") return "" - + except Exception as e: logger.error(f"{self.log_prefix} 工具信息获取失败: {e}") return "" diff --git a/src/tools/tool_executor.py b/src/tools/tool_executor.py index a46fdc4cd..6f2ecc651 100644 --- a/src/tools/tool_executor.py +++ b/src/tools/tool_executor.py @@ -2,7 +2,6 @@ from src.llm_models.utils_model import LLMRequest from src.config.config import global_config import time from src.common.logger import get_logger -from src.individuality.individuality import get_individuality from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.tools.tool_use import ToolUser from src.chat.utils.json_utils import process_llm_tool_calls @@ -30,13 +29,13 @@ If you need to use a tool, please directly call the corresponding tool function. class ToolExecutor: """独立的工具执行器组件 - + 可以直接输入聊天消息内容,自动判断并执行相应的工具,返回结构化的工具执行结果。 """ - + def __init__(self, chat_id: str = None, enable_cache: bool = True, cache_ttl: int = 3): """初始化工具执行器 - + Args: executor_id: 执行器标识符,用于日志记录 enable_cache: 是否启用缓存机制 @@ -48,41 +47,37 @@ class ToolExecutor: model=global_config.model.focus_tool_use, request_type="tool_executor", ) - + # 初始化工具实例 self.tool_instance = ToolUser() - + # 缓存配置 self.enable_cache = enable_cache self.cache_ttl = cache_ttl self.tool_cache = {} # 格式: {cache_key: {"result": result, "ttl": ttl, "timestamp": timestamp}} - + logger.info(f"{self.log_prefix}工具执行器初始化完成,缓存{'启用' if enable_cache else '禁用'},TTL={cache_ttl}") async def execute_from_chat_message( - self, - target_message: str, - chat_history: list[str], - sender: str, - return_details: bool = False + self, target_message: str, chat_history: list[str], sender: str, return_details: bool = False ) -> List[Dict] | Tuple[List[Dict], List[str], str]: """从聊天消息执行工具 - + Args: target_message: 目标消息内容 chat_history: 聊天历史 sender: 发送者 return_details: 是否返回详细信息(使用的工具列表和提示词) - + Returns: 如果return_details为False: List[Dict] - 工具执行结果列表 如果return_details为True: Tuple[List[Dict], List[str], str] - (结果列表, 使用的工具, 提示词) """ - + # 首先检查缓存 cache_key = self._generate_cache_key(target_message, chat_history, sender) cached_result = self._get_from_cache(cache_key) - + if cached_result: logger.info(f"{self.log_prefix}使用缓存结果,跳过工具执行") if return_details: @@ -91,16 +86,16 @@ class ToolExecutor: return cached_result, used_tools, "使用缓存结果" else: return cached_result - + # 缓存未命中,执行工具调用 # 获取可用工具 tools = self.tool_instance._define_tools() - + # 获取当前时间 time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) - + bot_name = global_config.bot.nickname - + # 构建工具调用提示词 prompt = await global_prompt_manager.format_prompt( "tool_executor_prompt", @@ -110,31 +105,28 @@ class ToolExecutor: bot_name=bot_name, time_now=time_now, ) - + logger.debug(f"{self.log_prefix}开始LLM工具调用分析") - + # 调用LLM进行工具决策 - response, other_info = await self.llm_model.generate_response_async( - prompt=prompt, - tools=tools - ) - + response, other_info = await self.llm_model.generate_response_async(prompt=prompt, tools=tools) + # 解析LLM响应 if len(other_info) == 3: reasoning_content, model_name, tool_calls = other_info else: reasoning_content, model_name = other_info tool_calls = None - + # 执行工具调用 tool_results, used_tools = await self._execute_tool_calls(tool_calls) - + # 缓存结果 if tool_results: self._set_cache(cache_key, tool_results) - + logger.info(f"{self.log_prefix}工具执行完成,共执行{len(used_tools)}个工具: {used_tools}") - + if return_details: return tool_results, used_tools, prompt else: @@ -142,44 +134,44 @@ class ToolExecutor: async def _execute_tool_calls(self, tool_calls) -> Tuple[List[Dict], List[str]]: """执行工具调用 - + Args: tool_calls: LLM返回的工具调用列表 - + Returns: Tuple[List[Dict], List[str]]: (工具执行结果列表, 使用的工具名称列表) """ tool_results = [] used_tools = [] - + if not tool_calls: logger.debug(f"{self.log_prefix}无需执行工具") return tool_results, used_tools - + logger.info(f"{self.log_prefix}开始执行工具调用: {tool_calls}") - + # 处理工具调用 success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls) - + if not success: logger.error(f"{self.log_prefix}工具调用解析失败: {error_msg}") return tool_results, used_tools - + if not valid_tool_calls: logger.debug(f"{self.log_prefix}无有效工具调用") return tool_results, used_tools - + # 执行每个工具调用 for tool_call in valid_tool_calls: try: tool_name = tool_call.get("name", "unknown_tool") used_tools.append(tool_name) - + logger.debug(f"{self.log_prefix}执行工具: {tool_name}") - + # 执行工具 result = await self.tool_instance._execute_tool_call(tool_call) - + if result: tool_info = { "type": result.get("type", "unknown_type"), @@ -189,10 +181,10 @@ class ToolExecutor: "timestamp": time.time(), } tool_results.append(tool_info) - + logger.info(f"{self.log_prefix}工具{tool_name}执行成功,类型: {tool_info['type']}") logger.debug(f"{self.log_prefix}工具{tool_name}结果内容: {tool_info['content'][:200]}...") - + except Exception as e: logger.error(f"{self.log_prefix}工具{tool_name}执行失败: {e}") # 添加错误信息到结果中 @@ -204,85 +196,82 @@ class ToolExecutor: "timestamp": time.time(), } tool_results.append(error_info) - + return tool_results, used_tools def _generate_cache_key(self, target_message: str, chat_history: list[str], sender: str) -> str: """生成缓存键 - + Args: target_message: 目标消息内容 chat_history: 聊天历史 sender: 发送者 - + Returns: str: 缓存键 """ import hashlib + # 使用消息内容和群聊状态生成唯一缓存键 content = f"{target_message}_{chat_history}_{sender}" return hashlib.md5(content.encode()).hexdigest() - + def _get_from_cache(self, cache_key: str) -> Optional[List[Dict]]: """从缓存获取结果 - + Args: cache_key: 缓存键 - + Returns: Optional[List[Dict]]: 缓存的结果,如果不存在或过期则返回None """ if not self.enable_cache or cache_key not in self.tool_cache: return None - + cache_item = self.tool_cache[cache_key] if cache_item["ttl"] <= 0: # 缓存过期,删除 del self.tool_cache[cache_key] logger.debug(f"{self.log_prefix}缓存过期,删除缓存键: {cache_key}") return None - + # 减少TTL cache_item["ttl"] -= 1 logger.debug(f"{self.log_prefix}使用缓存结果,剩余TTL: {cache_item['ttl']}") return cache_item["result"] - + def _set_cache(self, cache_key: str, result: List[Dict]): """设置缓存 - + Args: cache_key: 缓存键 result: 要缓存的结果 """ if not self.enable_cache: return - - self.tool_cache[cache_key] = { - "result": result, - "ttl": self.cache_ttl, - "timestamp": time.time() - } + + self.tool_cache[cache_key] = {"result": result, "ttl": self.cache_ttl, "timestamp": time.time()} logger.debug(f"{self.log_prefix}设置缓存,TTL: {self.cache_ttl}") - + def _cleanup_expired_cache(self): """清理过期的缓存""" if not self.enable_cache: return - + expired_keys = [] for cache_key, cache_item in self.tool_cache.items(): if cache_item["ttl"] <= 0: expired_keys.append(cache_key) - + for key in expired_keys: del self.tool_cache[key] - + if expired_keys: logger.debug(f"{self.log_prefix}清理了{len(expired_keys)}个过期缓存") def get_available_tools(self) -> List[str]: """获取可用工具列表 - + Returns: List[str]: 可用工具名称列表 """ @@ -290,31 +279,25 @@ class ToolExecutor: return [tool.get("function", {}).get("name", "unknown") for tool in tools] async def execute_specific_tool( - self, - tool_name: str, - tool_args: Dict, - validate_args: bool = True + self, tool_name: str, tool_args: Dict, validate_args: bool = True ) -> Optional[Dict]: """直接执行指定工具 - + Args: tool_name: 工具名称 tool_args: 工具参数 validate_args: 是否验证参数 - + Returns: Optional[Dict]: 工具执行结果,失败时返回None """ try: - tool_call = { - "name": tool_name, - "arguments": tool_args - } - + tool_call = {"name": tool_name, "arguments": tool_args} + logger.info(f"{self.log_prefix}直接执行工具: {tool_name}") - + result = await self.tool_instance._execute_tool_call(tool_call) - + if result: tool_info = { "type": result.get("type", "unknown_type"), @@ -325,10 +308,10 @@ class ToolExecutor: } logger.info(f"{self.log_prefix}直接工具执行成功: {tool_name}") return tool_info - + except Exception as e: logger.error(f"{self.log_prefix}直接工具执行失败 {tool_name}: {e}") - + return None def clear_cache(self): @@ -337,36 +320,36 @@ class ToolExecutor: cache_count = len(self.tool_cache) self.tool_cache.clear() logger.info(f"{self.log_prefix}清空了{cache_count}个缓存项") - + def get_cache_status(self) -> Dict: """获取缓存状态信息 - + Returns: Dict: 包含缓存统计信息的字典 """ if not self.enable_cache: return {"enabled": False, "cache_count": 0} - + # 清理过期缓存 self._cleanup_expired_cache() - + total_count = len(self.tool_cache) ttl_distribution = {} - + for cache_item in self.tool_cache.values(): ttl = cache_item["ttl"] ttl_distribution[ttl] = ttl_distribution.get(ttl, 0) + 1 - + return { "enabled": True, "cache_count": total_count, "cache_ttl": self.cache_ttl, - "ttl_distribution": ttl_distribution + "ttl_distribution": ttl_distribution, } - + def set_cache_config(self, enable_cache: bool = None, cache_ttl: int = None): """动态修改缓存配置 - + Args: enable_cache: 是否启用缓存 cache_ttl: 缓存TTL @@ -374,7 +357,7 @@ class ToolExecutor: if enable_cache is not None: self.enable_cache = enable_cache logger.info(f"{self.log_prefix}缓存状态修改为: {'启用' if enable_cache else '禁用'}") - + if cache_ttl is not None and cache_ttl > 0: self.cache_ttl = cache_ttl logger.info(f"{self.log_prefix}缓存TTL修改为: {cache_ttl}") @@ -418,4 +401,4 @@ available_tools = executor.get_available_tools() cache_status = executor.get_cache_status() # 查看缓存状态 executor.clear_cache() # 清空缓存 executor.set_cache_config(cache_ttl=5) # 动态修改缓存配置 -""" \ No newline at end of file +"""