From 1f91967d2d6d13b2a09fa8b96ab4c05bc1f57c9a Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Mon, 11 Aug 2025 13:18:17 +0800 Subject: [PATCH] =?UTF-8?q?remove=EF=BC=9A=E7=A7=BB=E9=99=A4willing?= =?UTF-8?q?=E7=B3=BB=E7=BB=9F=EF=BC=8C=E7=A7=BB=E9=99=A4reply2=EF=BC=8C?= =?UTF-8?q?=E7=A7=BB=E9=99=A4=E8=83=BD=E9=87=8F=E5=80=BC,=E7=A7=BB?= =?UTF-8?q?=E9=99=A4reply=5Fto=E6=94=B9=E4=B8=BAmessage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/model_configuration_guide.md | 4 +- src/chat/chat_loop/heartFC_chat.py | 192 +++--------- src/chat/express/expression_learner.py | 2 +- src/chat/planner_actions/planner.py | 60 ++-- src/chat/replyer/default_generator.py | 120 +++---- src/chat/replyer/replyer_manager.py | 2 - src/chat/willing/mode_classical.py | 60 ---- src/chat/willing/mode_custom.py | 23 -- src/chat/willing/mode_mxp.py | 296 ------------------ src/chat/willing/willing_manager.py | 180 ----------- src/config/api_ada_configs.py | 5 +- src/main.py | 8 - src/mais4u/mai_think.py | 2 +- .../mais4u_chat/s4u_stream_generator.py | 18 +- src/plugin_system/apis/generator_api.py | 17 +- src/plugin_system/apis/send_api.py | 50 +-- template/model_config_template.toml | 9 +- 17 files changed, 155 insertions(+), 893 deletions(-) delete mode 100644 src/chat/willing/mode_classical.py delete mode 100644 src/chat/willing/mode_custom.py delete mode 100644 src/chat/willing/mode_mxp.py delete mode 100644 src/chat/willing/willing_manager.py diff --git a/docs/model_configuration_guide.md b/docs/model_configuration_guide.md index d5afbd296..1d83bff99 100644 --- a/docs/model_configuration_guide.md +++ b/docs/model_configuration_guide.md @@ -166,10 +166,10 @@ temperature = 0.7 max_tokens = 800 ``` -### replyer_1 - 主要回复模型 +### replyer - 主要回复模型 首要回复模型,也用于表达器和表达方式学习: ```toml -[model_task_config.replyer_1] +[model_task_config.replyer] model_list = ["siliconflow-deepseek-v3"] temperature = 0.2 max_tokens = 800 diff --git a/src/chat/chat_loop/heartFC_chat.py b/src/chat/chat_loop/heartFC_chat.py index f416bcecb..f3d9524e1 100644 --- a/src/chat/chat_loop/heartFC_chat.py +++ b/src/chat/chat_loop/heartFC_chat.py @@ -11,7 +11,6 @@ from src.common.logger import get_logger from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager from src.chat.utils.prompt_builder import global_prompt_manager from src.chat.utils.timer_calculator import Timer -from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_with_chat from src.chat.planner_actions.planner import ActionPlanner from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.planner_actions.action_manager import ActionManager @@ -22,9 +21,9 @@ from src.person_info.person_info import get_person_info_manager from src.plugin_system.base.component_types import ActionInfo, ChatMode, EventType from src.plugin_system.core import events_manager from src.plugin_system.apis import generator_api, send_api, message_api, database_api -from src.chat.willing.willing_manager import get_willing_manager from src.mais4u.mai_think import mai_thinking_manager from src.mais4u.constant_s4u import ENABLE_S4U +import math # no_reply逻辑已集成到heartFC_chat.py中,不再需要导入 from src.chat.chat_loop.hfc_utils import send_typing, stop_typing @@ -99,7 +98,6 @@ class HeartFChatting: # 循环控制内部状态 self.running: bool = False self._loop_task: Optional[asyncio.Task] = None # 主循环任务 - self._energy_task: Optional[asyncio.Task] = None # 添加循环信息管理相关的属性 self.history_loop: List[CycleDetail] = [] @@ -110,12 +108,6 @@ class HeartFChatting: self.plan_timeout_count = 0 self.last_read_time = time.time() - 1 - - self.willing_manager = get_willing_manager() - - logger.info(f"{self.log_prefix} HeartFChatting 初始化完成") - - self.energy_value = 5 self.focus_energy = 1 self.no_reply_consecutive = 0 @@ -134,9 +126,6 @@ class HeartFChatting: # 标记为活动状态,防止重复启动 self.running = True - self._energy_task = asyncio.create_task(self._energy_loop()) - self._energy_task.add_done_callback(self._handle_energy_completion) - self._loop_task = asyncio.create_task(self._main_chat_loop()) self._loop_task.add_done_callback(self._handle_loop_completion) logger.info(f"{self.log_prefix} HeartFChatting 启动完成") @@ -172,19 +161,6 @@ class HeartFChatting: self._current_cycle_detail.timers = cycle_timers self._current_cycle_detail.end_time = time.time() - def _handle_energy_completion(self, task: asyncio.Task): - if exception := task.exception(): - logger.error(f"{self.log_prefix} HeartFChatting: 能量循环异常: {exception}") - logger.error(traceback.format_exc()) - else: - logger.info(f"{self.log_prefix} HeartFChatting: 能量循环完成") - - async def _energy_loop(self): - while self.running: - await asyncio.sleep(12) - self.energy_value -= 0.5 - self.energy_value = max(self.energy_value, 0.3) - def print_cycle_info(self, cycle_timers): # 记录循环信息和计时器结果 timer_strings = [] @@ -250,10 +226,8 @@ class HeartFChatting: """ new_message_count = len(new_message) - - # talk_frequency = global_config.chat.get_current_talk_frequency(self.stream_id) modified_exit_count_threshold = self.focus_energy / global_config.chat.focus_value - + modified_exit_interest_threshold = 3 / global_config.chat.focus_value total_interest = 0.0 for msg_dict in new_message: interest_value = msg_dict.get("interest_value", 0.0) @@ -261,16 +235,12 @@ class HeartFChatting: total_interest += interest_value if new_message_count >= modified_exit_count_threshold: - # 记录兴趣度到列表 - - self.recent_interest_records.append(total_interest) - logger.info( f"{self.log_prefix} 累计消息数量达到{new_message_count}条(>{modified_exit_count_threshold:.1f}),结束等待" ) - logger.info(self.last_read_time) - logger.info(new_message) + # logger.info(self.last_read_time) + # logger.info(new_message) return True,total_interest/new_message_count # 检查累计兴趣值 @@ -280,12 +250,11 @@ class HeartFChatting: logger.info(f"{self.log_prefix} breaking形式当前累计兴趣值: {total_interest:.2f}, 专注度: {global_config.chat.focus_value:.1f}") self._last_accumulated_interest = total_interest - if total_interest >= 3 / global_config.chat.focus_value: + if total_interest >= modified_exit_interest_threshold: # 记录兴趣度到列表 self.recent_interest_records.append(total_interest) - logger.info( - f"{self.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{3 / global_config.chat.focus_value}),结束等待" + f"{self.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{modified_exit_interest_threshold:.1f}),结束等待" ) return True,total_interest/new_message_count @@ -314,8 +283,6 @@ class HeartFChatting: should_process,interest_value = await self._should_process_messages(recent_messages_dict) if should_process: - # earliest_message_data = recent_messages_dict[0] - # self.last_read_time = earliest_message_data.get("time") self.last_read_time = time.time() await self._observe(interest_value = interest_value) @@ -323,38 +290,22 @@ class HeartFChatting: # Normal模式:消息数量不足,等待 await asyncio.sleep(0.5) return True - return True - async def build_reply_to_str(self, message_data: dict): - person_info_manager = get_person_info_manager() - - # 获取 platform,如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值 - platform = message_data.get("chat_info_platform") - if platform is None: - platform = getattr(self.chat_stream, "platform", "unknown") - - person_id = person_info_manager.get_person_id( - platform, # type: ignore - message_data.get("user_id"), # type: ignore - ) - person_name = await person_info_manager.get_value(person_id, "person_name") - return f"{person_name}:{message_data.get('processed_plain_text')}" - async def _send_and_store_reply( self, response_set, - reply_to_str, loop_start_time, action_message, cycle_timers: Dict[str, float], thinking_id, actions, ) -> Tuple[Dict[str, Any], str, Dict[str, float]]: + with Timer("回复发送", cycle_timers): - reply_text = await self._send_response(response_set, reply_to_str, loop_start_time, action_message) + reply_text = await self._send_response(response_set, loop_start_time, action_message) - # 存储reply action信息 + # 存储reply action信息 person_info_manager = get_person_info_manager() # 获取 platform,如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值 @@ -375,7 +326,7 @@ class HeartFChatting: action_prompt_display=action_prompt_display, action_done=True, thinking_id=thinking_id, - action_data={"reply_text": reply_text, "reply_to": reply_to_str}, + action_data={"reply_text": reply_text}, action_name="reply", ) @@ -398,12 +349,7 @@ class HeartFChatting: action_type = "no_action" reply_text = "" # 初始化reply_text变量,避免UnboundLocalError - reply_to_str = "" # 初始化reply_to_str变量 - # 根据interest_value计算概率,决定使用哪种planner模式 - # interest_value越高,越倾向于使用Normal模式 - import random - import math # 使用sigmoid函数将interest_value转换为概率 # 当interest_value为0时,概率接近0(使用Focus模式) @@ -469,13 +415,6 @@ class HeartFChatting: available_actions=available_actions, ) - # action_result: Dict[str, Any] = plan_result.get("action_result", {}) # type: ignore - # action_type, action_data, reasoning, is_parallel = ( - # action_result.get("action_type", "error"), - # action_result.get("action_data", {}), - # action_result.get("reasoning", "未提供理由"), - # action_result.get("is_parallel", True), - # ) # 3. 并行执行所有动作 @@ -522,32 +461,26 @@ class HeartFChatting: "command": command } else: - # 执行回复动作 - reply_to_str = await self.build_reply_to_str(action_info["action_message"]) - - # 生成回复 - gather_timeout = global_config.chat.thinking_timeout try: - response_set = await asyncio.wait_for( - self._generate_response( - message_data=action_info["action_message"], - available_actions=action_info["available_actions"], - reply_to=reply_to_str, - request_type="chat.replyer", - ), - timeout=gather_timeout + success, response_set, _ = await generator_api.generate_reply( + chat_stream=self.chat_stream, + reply_message = action_info["action_message"], + available_actions=available_actions, + enable_tool=global_config.tool.enable_tool, + request_type="chat.replyer", + from_plugin=False, ) - except asyncio.TimeoutError: - logger.warning( - f"{self.log_prefix} 并行执行:回复生成超时>{global_config.chat.thinking_timeout}s,已跳过" - ) - return { - "action_type": "reply", - "success": False, - "reply_text": "", - "loop_info": None - } + + if not success or not response_set: + logger.info(f"对 {action_info['action_message'].get('processed_plain_text')} 的回复生成失败") + return { + "action_type": "reply", + "success": False, + "reply_text": "", + "loop_info": None + } + except asyncio.CancelledError: logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消") return { @@ -557,18 +490,8 @@ class HeartFChatting: "loop_info": None } - if not response_set: - logger.warning(f"{self.log_prefix} 模型超时或生成回复内容为空") - return { - "action_type": "reply", - "success": False, - "reply_text": "", - "loop_info": None - } - loop_info, reply_text, cycle_timers_reply = await self._send_and_store_reply( response_set, - reply_to_str, loop_start_time, action_info["action_message"], cycle_timers, @@ -592,8 +515,7 @@ class HeartFChatting: "error": str(e) } - # 创建所有动作的后台任务 - # print(actions) + action_tasks = [asyncio.create_task(execute_action(action)) for action in actions] @@ -762,42 +684,11 @@ class HeartFChatting: traceback.print_exc() return False, "", "" - async def _generate_response( - self, - message_data: dict, - available_actions: Optional[Dict[str, ActionInfo]], - reply_to: str, - request_type: str = "chat.replyer.normal", - ) -> Optional[list]: - """生成普通回复""" - try: - success, reply_set, _ = await generator_api.generate_reply( - chat_stream=self.chat_stream, - reply_to=reply_to, - available_actions=available_actions, - enable_tool=global_config.tool.enable_tool, - request_type=request_type, - from_plugin=False, - ) - - if not success or not reply_set: - logger.info(f"对 {message_data.get('processed_plain_text')} 的回复生成失败") - return None - - return reply_set - - except Exception as e: - logger.error(f"{self.log_prefix}回复生成出现错误:{str(e)} {traceback.format_exc()}") - return None - - async def _send_response(self, reply_set, reply_to, thinking_start_time, message_data) -> str: + async def _send_response(self, reply_set, thinking_start_time, message_data) -> str: current_time = time.time() new_message_count = message_api.count_new_messages( chat_id=self.chat_stream.stream_id, start_time=thinking_start_time, end_time=current_time ) - platform = message_data.get("user_platform", "") - user_id = message_data.get("user_id", "") - reply_to_platform_id = f"{platform}:{user_id}" need_reply = new_message_count >= random.randint(2, 4) @@ -809,27 +700,20 @@ class HeartFChatting: for reply_seg in reply_set: data = reply_seg[1] if not first_replied: - if need_reply: - await send_api.text_to_stream( - text=data, - stream_id=self.chat_stream.stream_id, - reply_to=reply_to, - reply_to_platform_id=reply_to_platform_id, - typing=False, - ) - else: - await send_api.text_to_stream( - text=data, - stream_id=self.chat_stream.stream_id, - reply_to_platform_id=reply_to_platform_id, - typing=False, - ) + await send_api.text_to_stream( + text=data, + stream_id=self.chat_stream.stream_id, + reply_to_message = message_data, + set_reply=need_reply, + typing=False, + ) first_replied = True else: await send_api.text_to_stream( text=data, stream_id=self.chat_stream.stream_id, - reply_to_platform_id=reply_to_platform_id, + reply_to_message = message_data, + set_reply=need_reply, typing=True, ) reply_text += data diff --git a/src/chat/express/expression_learner.py b/src/chat/express/expression_learner.py index 19ada5472..71bc2c355 100644 --- a/src/chat/express/expression_learner.py +++ b/src/chat/express/expression_learner.py @@ -81,7 +81,7 @@ def init_prompt() -> None: class ExpressionLearner: def __init__(self, chat_id: str) -> None: self.express_learn_model: LLMRequest = LLMRequest( - model_set=model_config.model_task_config.replyer_1, request_type="expressor.learner" + model_set=model_config.model_task_config.replyer, request_type="expressor.learner" ) self.chat_id = chat_id self.chat_name = get_chat_manager().get_stream_name(chat_id) or chat_id diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index a70395a44..f80f677fa 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -42,6 +42,19 @@ def init_prompt(): {actions_before_now_block} {no_action_block} + +动作:reply +动作描述:参与聊天回复,发送文本进行表达 +- 你想要闲聊或者随便附 +- {mentioned_bonus} +- 如果你刚刚进行了回复,不要对同一个话题重复回应 +- 不要回复自己发送的消息 +{{ + "action": "reply", + "target_message_id":"触发action的消息id", + "reason":"回复的原因" +}} + {action_options_text} 你必须从上面列出的可用action中选择一个,并说明触发action的消息id(不是消息原文)和选择该action的原因。 @@ -82,7 +95,6 @@ class ActionPlanner: self.max_plan_retries = 3 def find_message_by_id(self, message_id: str, message_id_list: list) -> Optional[Dict[str, Any]]: - # sourcery skip: use-next """ 根据message_id从message_id_list中查找对应的原始消息 @@ -187,12 +199,11 @@ class ActionPlanner: if key not in ["action", "reasoning"]: action_data[key] = value - # 在FOCUS模式下,非no_reply动作需要target_message_id + # 非no_reply动作需要target_message_id if action != "no_reply": if target_message_id := parsed_json.get("target_message_id"): # 根据target_message_id查找原始消息 target_message = self.find_message_by_id(target_message_id, message_id_list) - # target_message = None # 如果获取的target_message为None,输出warning并重新plan if target_message is None: self.plan_retry_count += 1 @@ -205,7 +216,7 @@ class ActionPlanner: self.plan_retry_count = 0 # 重置计数器 else: # 递归重新plan - return await self.plan(mode) + return await self.plan(mode, loop_start_time, available_actions) else: # 成功获取到target_message,重置计数器 self.plan_retry_count = 0 @@ -213,9 +224,8 @@ class ActionPlanner: logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id") - if action == "no_action": - reasoning = "normal决定不使用额外动作" - elif action != "no_reply" and action != "reply" and action not in current_available_actions: + + if action != "no_reply" and action != "reply" and action not in current_available_actions: logger.warning( f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" ) @@ -301,7 +311,6 @@ class ActionPlanner: actions_before_now_block = f"你刚刚选择并执行过的action是:\n{actions_before_now_block}" if refresh_time: self.last_obs_time_mark = time.time() - # logger.info(f"{self.log_prefix}当前时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}") mentioned_bonus = "" if global_config.chat.mentioned_bot_inevitable_reply: @@ -311,43 +320,19 @@ class ActionPlanner: if mode == ChatMode.FOCUS: - no_action_block = f"""重要说明: -- 'no_reply' 表示只进行不进行回复,等待合适的回复时机 + no_action_block = """重要说明: +- 'no_reply' 表示不进行回复,等待合适的回复时机 - 当你刚刚发送了消息,没有人回复时,选择no_reply - 当你一次发送了太多消息,为了避免打扰聊天节奏,选择no_reply - -动作:reply -动作描述:参与聊天回复,发送文本进行表达 -- 你想要闲聊或者随便附 -- {mentioned_bonus} -- 如果你刚刚进行了回复,不要对同一个话题重复回应 -- 不要回复自己发送的消息 -{{ - "action": "reply", - "target_message_id":"触发action的消息id", - "reason":"回复的原因" -}} - """ else: - no_action_block = f"""重要说明: + no_action_block = """重要说明: - 'reply' 表示只进行普通聊天回复,不执行任何额外动作 - 其他action表示在普通回复的基础上,执行相应的额外动作 - -动作:reply -动作描述:参与聊天回复,发送文本进行表达 -- 你想要闲聊或者随便附 -- {mentioned_bonus} -- 如果你刚刚进行了回复,不要对同一个话题重复回应 -- 不要回复自己发送的消息 -{{ - "action": "reply", - "target_message_id":"触发action的消息id", - "reason":"回复的原因" -}}""" +""" chat_context_description = "你现在正在一个群聊中" - chat_target_name = None # Only relevant for private + chat_target_name = None if not is_group_chat and chat_target_info: chat_target_name = ( chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or "对方" @@ -399,6 +384,7 @@ class ActionPlanner: chat_content_block=chat_content_block, actions_before_now_block=actions_before_now_block, no_action_block=no_action_block, + mentioned_bonus=mentioned_bonus, action_options_text=action_options_block, moderation_prompt=moderation_prompt_block, identity_block=identity_block, diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index c1a61fb08..fe023daf1 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -121,40 +121,11 @@ class DefaultReplyer: def __init__( self, chat_stream: ChatStream, - model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, - request_type: str = "focus.replyer", + request_type: str = "replyer", ): - self.request_type = request_type - - if model_set_with_weight: - # self.express_model_configs = model_configs - self.model_set: List[Tuple[TaskConfig, float]] = model_set_with_weight - else: - # 当未提供配置时,使用默认配置并赋予默认权重 - - # model_config_1 = global_config.model.replyer_1.copy() - # model_config_2 = global_config.model.replyer_2.copy() - prob_first = global_config.chat.replyer_random_probability - - # model_config_1["weight"] = prob_first - # model_config_2["weight"] = 1.0 - prob_first - - # self.express_model_configs = [model_config_1, model_config_2] - self.model_set = [ - (model_config.model_task_config.replyer_1, prob_first), - (model_config.model_task_config.replyer_2, 1.0 - prob_first), - ] - - # if not self.express_model_configs: - # logger.warning("未找到有效的模型配置,回复生成可能会失败。") - # # 提供一个最终的回退,以防止在空列表上调用 random.choice - # fallback_config = global_config.model.replyer_1.copy() - # fallback_config.setdefault("weight", 1.0) - # self.express_model_configs = [fallback_config] - + self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type) self.chat_stream = chat_stream self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id) - self.heart_fc_sender = HeartFCSender() self.memory_activator = MemoryActivator() self.instant_memory = InstantMemory(chat_id=self.chat_stream.stream_id) @@ -163,14 +134,6 @@ class DefaultReplyer: self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3) - def _select_weighted_models_config(self) -> Tuple[TaskConfig, float]: - """使用加权随机选择来挑选一个模型配置""" - configs = self.model_set - # 提取权重,如果模型配置中没有'weight'键,则默认为1.0 - weights = [weight for _, weight in configs] - - return random.choices(population=configs, weights=weights, k=1)[0] - async def generate_reply_with_context( self, reply_to: str = "", @@ -179,8 +142,8 @@ class DefaultReplyer: enable_tool: bool = True, from_plugin: bool = True, stream_id: Optional[str] = None, + reply_message: Optional[Dict[str, Any]] = None, ) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str]]: - # sourcery skip: merge-nested-ifs """ 回复器 (Replier): 负责生成回复文本的核心逻辑。 @@ -205,6 +168,7 @@ class DefaultReplyer: extra_info=extra_info, available_actions=available_actions, enable_tool=enable_tool, + reply_message=reply_message, ) if not prompt: @@ -302,16 +266,11 @@ class DefaultReplyer: traceback.print_exc() return False, None, prompt if return_prompt else None - async def build_relation_info(self, reply_to: str = ""): + async def build_relation_info(self, sender: str, target: str): if not global_config.relationship.enable_relationship: return "" relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id) - if not reply_to: - return "" - sender, text = self._parse_reply_target(reply_to) - if not sender or not text: - return "" # 获取用户ID person_info_manager = get_person_info_manager() @@ -418,7 +377,7 @@ class DefaultReplyer: return memory_str - async def build_tool_info(self, chat_history: str, reply_to: str = "", enable_tool: bool = True) -> str: + async def build_tool_info(self, chat_history: str, sender: str, target: str, enable_tool: bool = True) -> str: """构建工具信息块 Args: @@ -433,18 +392,11 @@ class DefaultReplyer: if not enable_tool: return "" - if not reply_to: - return "" - - sender, text = self._parse_reply_target(reply_to) - - if not text: - return "" try: # 使用工具执行器获取信息 tool_results, _, _ = await self.tool_executor.execute_from_chat_message( - sender=sender, target_message=text, chat_history=chat_history, return_details=False + sender=sender, target_message=target, chat_history=chat_history, return_details=False ) if tool_results: @@ -672,7 +624,8 @@ class DefaultReplyer: extra_info: str = "", available_actions: Optional[Dict[str, ActionInfo]] = None, enable_tool: bool = True, - ) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if + reply_message: Optional[Dict[str, Any]] = None, + ) -> str: """ 构建回复器上下文 @@ -682,7 +635,7 @@ class DefaultReplyer: available_actions: 可用动作 enable_timeout: 是否启用超时处理 enable_tool: 是否启用工具调用 - + reply_message: 回复的原始消息 Returns: str: 构建好的上下文 """ @@ -698,8 +651,21 @@ class DefaultReplyer: mood_prompt = chat_mood.mood_state else: mood_prompt = "" - - sender, target = self._parse_reply_target(reply_to) + + if reply_to: + #兼容旧的reply_to + sender, target = self._parse_reply_target(reply_to) + else: + # 获取 platform,如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值 + platform = reply_message.get("chat_info_platform") + person_id = person_info_manager.get_person_id( + platform, # type: ignore + reply_message.get("user_id"), # type: ignore + ) + person_name = await person_info_manager.get_value(person_id, "person_name") + sender = person_name + target = reply_message.get('processed_plain_text') + person_info_manager = get_person_info_manager() person_id = person_info_manager.get_person_id_by_person_name(sender) user_id = person_info_manager.get_value_sync(person_id, "user_id") @@ -744,12 +710,12 @@ class DefaultReplyer: self._time_and_run_task( self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits" ), - self._time_and_run_task(self.build_relation_info(reply_to), "relation_info"), + self._time_and_run_task(self.build_relation_info(sender, target), "relation_info"), self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"), self._time_and_run_task( - self.build_tool_info(chat_talking_prompt_short, reply_to, enable_tool=enable_tool), "tool_info" + self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info" ), - self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, reply_to), "prompt_info"), + self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"), ) # 任务名称中英文映射 @@ -899,12 +865,17 @@ class DefaultReplyer: raw_reply: str, reason: str, reply_to: str, + reply_message: Optional[Dict[str, Any]] = None, ) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if chat_stream = self.chat_stream chat_id = chat_stream.stream_id is_group_chat = bool(chat_stream.group_info) - sender, target = self._parse_reply_target(reply_to) + if reply_message: + sender = reply_message.get("sender") + target = reply_message.get("target") + else: + sender, target = self._parse_reply_target(reply_to) # 添加情绪状态获取 if global_config.mood.enable_mood: @@ -930,7 +901,7 @@ class DefaultReplyer: # 并行执行2个构建任务 expression_habits_block, relation_info = await asyncio.gather( self.build_expression_habits(chat_talking_prompt_half, target), - self.build_relation_info(reply_to), + self.build_relation_info(sender, target), ) keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target) @@ -1035,34 +1006,25 @@ class DefaultReplyer: async def llm_generate_content(self, prompt: str): with Timer("LLM生成", {}): # 内部计时器,可选保留 - # 加权随机选择一个模型配置 - selected_model_config, weight = self._select_weighted_models_config() - logger.info(f"使用模型集生成回复: {selected_model_config} (选中概率: {weight})") - - express_model = LLMRequest(model_set=selected_model_config, request_type=self.request_type) + # 直接使用已初始化的模型实例 + logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}") if global_config.debug.show_prompt: logger.info(f"\n{prompt}\n") else: logger.debug(f"\n{prompt}\n") - content, (reasoning_content, model_name, tool_calls) = await express_model.generate_response_async(prompt) + content, (reasoning_content, model_name, tool_calls) = await self.express_model.generate_response_async(prompt) logger.debug(f"replyer生成内容: {content}") return content, reasoning_content, model_name, tool_calls - async def get_prompt_info(self, message: str, reply_to: str): + async def get_prompt_info(self, message: str, sender: str, target: str): related_info = "" start_time = time.time() from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool - if not reply_to: - logger.debug("没有回复对象,跳过获取知识库内容") - return "" - sender, content = self._parse_reply_target(reply_to) - if not content: - logger.debug("回复对象内容为空,跳过获取知识库内容") - return "" + logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") # 从LPMM知识库获取知识 try: @@ -1080,7 +1042,7 @@ class DefaultReplyer: time_now=time_now, chat_history=message, sender=sender, - target_message=content, + target_message=target, ) _, _, _, _, tool_calls = await llm_api.generate_with_model_with_tools( prompt, diff --git a/src/chat/replyer/replyer_manager.py b/src/chat/replyer/replyer_manager.py index bb3a313b7..2613e49a1 100644 --- a/src/chat/replyer/replyer_manager.py +++ b/src/chat/replyer/replyer_manager.py @@ -16,7 +16,6 @@ class ReplyerManager: self, chat_stream: Optional[ChatStream] = None, chat_id: Optional[str] = None, - model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, request_type: str = "replyer", ) -> Optional[DefaultReplyer]: """ @@ -50,7 +49,6 @@ class ReplyerManager: # model_configs 只在此时(初始化时)生效 replyer = DefaultReplyer( chat_stream=target_stream, - model_set_with_weight=model_set_with_weight, # 可以是None,此时使用默认模型 request_type=request_type, ) self._repliers[stream_id] = replyer diff --git a/src/chat/willing/mode_classical.py b/src/chat/willing/mode_classical.py deleted file mode 100644 index 16d67bb5e..000000000 --- a/src/chat/willing/mode_classical.py +++ /dev/null @@ -1,60 +0,0 @@ -import asyncio - -from src.config.config import global_config -from .willing_manager import BaseWillingManager - - -class ClassicalWillingManager(BaseWillingManager): - def __init__(self): - super().__init__() - self._decay_task: asyncio.Task | None = None - - async def _decay_reply_willing(self): - """定期衰减回复意愿""" - while True: - await asyncio.sleep(1) - for chat_id in self.chat_reply_willing: - self.chat_reply_willing[chat_id] = max(0.0, self.chat_reply_willing[chat_id] * 0.9) - - async def async_task_starter(self): - if self._decay_task is None: - self._decay_task = asyncio.create_task(self._decay_reply_willing()) - - async def get_reply_probability(self, message_id): - willing_info = self.ongoing_messages[message_id] - chat_id = willing_info.chat_id - current_willing = self.chat_reply_willing.get(chat_id, 0) - - # print(f"[{chat_id}] 回复意愿: {current_willing}") - - interested_rate = willing_info.interested_rate - - # print(f"[{chat_id}] 兴趣值: {interested_rate}") - - current_willing += interested_rate - - if willing_info.is_mentioned_bot and global_config.chat.mentioned_bot_inevitable_reply and current_willing < 2: - current_willing += 1 if current_willing < 1.0 else 0.2 - - self.chat_reply_willing[chat_id] = min(current_willing, 1.0) - - reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1.5) - - # print(f"[{chat_id}] 回复概率: {reply_probability}") - - return reply_probability - - async def before_generate_reply_handle(self, message_id): - pass - - async def after_generate_reply_handle(self, message_id): - if message_id not in self.ongoing_messages: - return - - chat_id = self.ongoing_messages[message_id].chat_id - current_willing = self.chat_reply_willing.get(chat_id, 0) - if current_willing < 1: - self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.3) - - async def not_reply_handle(self, message_id): - return await super().not_reply_handle(message_id) diff --git a/src/chat/willing/mode_custom.py b/src/chat/willing/mode_custom.py deleted file mode 100644 index 9987ba942..000000000 --- a/src/chat/willing/mode_custom.py +++ /dev/null @@ -1,23 +0,0 @@ -from .willing_manager import BaseWillingManager - -NOT_IMPLEMENTED_MESSAGE = "\ncustom模式你实现了吗?没自行实现不要选custom。给你退了快点给你麦爹配置\n注:以上内容由gemini生成,如有不满请投诉gemini" - -class CustomWillingManager(BaseWillingManager): - async def async_task_starter(self) -> None: - raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE) - - async def before_generate_reply_handle(self, message_id: str): - raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE) - - async def after_generate_reply_handle(self, message_id: str): - raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE) - - async def not_reply_handle(self, message_id: str): - raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE) - - async def get_reply_probability(self, message_id: str): - raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE) - - def __init__(self): - super().__init__() - raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE) diff --git a/src/chat/willing/mode_mxp.py b/src/chat/willing/mode_mxp.py deleted file mode 100644 index a249cb6f1..000000000 --- a/src/chat/willing/mode_mxp.py +++ /dev/null @@ -1,296 +0,0 @@ -""" -Mxp 模式:梦溪畔独家赞助 -此模式的一些参数不会在配置文件中显示,要修改请在可变参数下修改 -同时一些全局设置对此模式无效 -此模式的可变参数暂时比较草率,需要调参仙人的大手 -此模式的特点: -1.每个聊天流的每个用户的意愿是独立的 -2.接入关系系统,关系会影响意愿值(已移除,因为关系系统重构) -3.会根据群聊的热度来调整基础意愿值 -4.限制同时思考的消息数量,防止喷射 -5.拥有单聊增益,无论在群里还是私聊,只要bot一直和你聊,就会增加意愿值 -6.意愿分为衰减意愿+临时意愿 -7.疲劳机制 - -如果你发现本模式出现了bug -上上策是询问智慧的小草神() -上策是询问万能的千石可乐 -中策是发issue -下下策是询问一个菜鸟(@梦溪畔) -""" - -from typing import Dict -import asyncio -import time -import math - -from src.chat.message_receive.chat_stream import ChatStream -from .willing_manager import BaseWillingManager - - -class MxpWillingManager(BaseWillingManager): - """Mxp意愿管理器""" - - def __init__(self): - super().__init__() - self.chat_person_reply_willing: Dict[str, Dict[str, float]] = {} # chat_id: {person_id: 意愿值} - self.chat_new_message_time: Dict[str, list[float]] = {} # 聊天流ID: 消息时间 - self.last_response_person: Dict[str, tuple[str, int]] = {} # 上次回复的用户信息 - self.temporary_willing: float = 0 # 临时意愿值 - self.chat_bot_message_time: Dict[str, list[float]] = {} # 聊天流ID: bot已回复消息时间 - self.chat_fatigue_punishment_list: Dict[ - str, list[tuple[float, float]] - ] = {} # 聊天流疲劳惩罚列, 聊天流ID: 惩罚时间列(开始时间,持续时间) - self.chat_fatigue_willing_attenuation: Dict[str, float] = {} # 聊天流疲劳意愿衰减值 - - # 可变参数 - self.intention_decay_rate = 0.93 # 意愿衰减率 - - self.number_of_message_storage = 12 # 消息存储数量 - self.expected_replies_per_min = 3 # 每分钟预期回复数 - self.basic_maximum_willing = 0.5 # 基础最大意愿值 - - self.mention_willing_gain = 0.6 # 提及意愿增益 - self.interest_willing_gain = 0.3 # 兴趣意愿增益 - self.single_chat_gain = 0.12 # 单聊增益 - - self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int) - self.fatigue_coefficient = 1.0 # 疲劳系数 - - self.is_debug = False # 是否开启调试模式 - - async def async_task_starter(self) -> None: - """异步任务启动器""" - asyncio.create_task(self._return_to_basic_willing()) - asyncio.create_task(self._chat_new_message_to_change_basic_willing()) - asyncio.create_task(self._fatigue_attenuation()) - - async def before_generate_reply_handle(self, message_id: str): - """回复前处理""" - current_time = time.time() - async with self.lock: - w_info = self.ongoing_messages[message_id] - if w_info.chat_id not in self.chat_bot_message_time: - self.chat_bot_message_time[w_info.chat_id] = [] - self.chat_bot_message_time[w_info.chat_id] = [ - t for t in self.chat_bot_message_time[w_info.chat_id] if current_time - t < 60 - ] - self.chat_bot_message_time[w_info.chat_id].append(current_time) - if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num): - time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0)) - self.chat_fatigue_punishment_list[w_info.chat_id].append((current_time, time_interval * 2)) - - async def after_generate_reply_handle(self, message_id: str): - """回复后处理""" - async with self.lock: - w_info = self.ongoing_messages[message_id] - # 移除关系值相关代码 - # rel_value = await w_info.person_info_manager.get_value(w_info.person_id, "relationship_value") - # rel_level = self._get_relationship_level_num(rel_value) - # self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += rel_level * 0.05 - - now_chat_new_person = self.last_response_person.get(w_info.chat_id, (w_info.person_id, 0)) - if now_chat_new_person[0] == w_info.person_id: - if now_chat_new_person[1] < 3: - tmp_list = list(now_chat_new_person) - tmp_list[1] += 1 # type: ignore - self.last_response_person[w_info.chat_id] = tuple(tmp_list) # type: ignore - else: - self.last_response_person[w_info.chat_id] = (w_info.person_id, 0) - - async def not_reply_handle(self, message_id: str): - """不回复处理""" - async with self.lock: - w_info = self.ongoing_messages[message_id] - if w_info.is_mentioned_bot: - self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.mention_willing_gain / 2.5 - if ( - w_info.chat_id in self.last_response_person - and self.last_response_person[w_info.chat_id][0] == w_info.person_id - and self.last_response_person[w_info.chat_id][1] - ): - self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.single_chat_gain * ( - 2 * self.last_response_person[w_info.chat_id][1] - 1 - ) - now_chat_new_person = self.last_response_person.get(w_info.chat_id, ("", 0)) - if now_chat_new_person[0] != w_info.person_id: - self.last_response_person[w_info.chat_id] = (w_info.person_id, 0) - - async def get_reply_probability(self, message_id: str): - # sourcery skip: merge-duplicate-blocks, remove-redundant-if - """获取回复概率""" - async with self.lock: - w_info = self.ongoing_messages[message_id] - current_willing = self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] - if self.is_debug: - self.logger.debug(f"基础意愿值:{current_willing}") - - if w_info.is_mentioned_bot: - willing_gain = self.mention_willing_gain / (int(current_willing) + 1) - current_willing += willing_gain - if self.is_debug: - self.logger.debug(f"提及增益:{willing_gain}") - - if w_info.interested_rate > 0: - willing_gain = math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain - current_willing += willing_gain - if self.is_debug: - self.logger.debug(f"兴趣增益:{willing_gain}") - - self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] = current_willing - - # 添加单聊增益 - if ( - w_info.chat_id in self.last_response_person - and self.last_response_person[w_info.chat_id][0] == w_info.person_id - and self.last_response_person[w_info.chat_id][1] - ): - current_willing += self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1) - if self.is_debug: - self.logger.debug( - f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}" - ) - - current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0) - if self.is_debug: - self.logger.debug(f"疲劳衰减:{self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)}") - - chat_ongoing_messages = [msg for msg in self.ongoing_messages.values() if msg.chat_id == w_info.chat_id] - chat_person_ongoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id] - if len(chat_person_ongoing_messages) >= 2: - current_willing = 0 - if self.is_debug: - self.logger.debug("进行中消息惩罚:归0") - elif len(chat_ongoing_messages) == 2: - current_willing -= 0.5 - if self.is_debug: - self.logger.debug("进行中消息惩罚:-0.5") - elif len(chat_ongoing_messages) == 3: - current_willing -= 1.5 - if self.is_debug: - self.logger.debug("进行中消息惩罚:-1.5") - elif len(chat_ongoing_messages) >= 4: - current_willing = 0 - if self.is_debug: - self.logger.debug("进行中消息惩罚:归0") - - probability = self._willing_to_probability(current_willing) - - self.temporary_willing = current_willing - - return probability - - async def _return_to_basic_willing(self): - """使每个人的意愿恢复到chat基础意愿""" - while True: - await asyncio.sleep(3) - async with self.lock: - for chat_id, person_willing in self.chat_person_reply_willing.items(): - for person_id, willing in person_willing.items(): - if chat_id not in self.chat_reply_willing: - self.logger.debug(f"聊天流{chat_id}不存在,错误") - continue - basic_willing = self.chat_reply_willing[chat_id] - person_willing[person_id] = ( - basic_willing + (willing - basic_willing) * self.intention_decay_rate - ) - - def setup(self, message: dict, chat_stream: ChatStream): - super().setup(message, chat_stream) - stream_id = chat_stream.stream_id - self.chat_reply_willing[stream_id] = self.chat_reply_willing.get(stream_id, self.basic_maximum_willing) - self.chat_person_reply_willing[stream_id] = self.chat_person_reply_willing.get(stream_id, {}) - self.chat_person_reply_willing[stream_id][self.ongoing_messages[message.get("message_id", "")].person_id] = ( - self.chat_person_reply_willing[stream_id].get( - self.ongoing_messages[message.get("message_id", "")].person_id, - self.chat_reply_willing[stream_id], - ) - ) - - current_time = time.time() - if stream_id not in self.chat_new_message_time: - self.chat_new_message_time[stream_id] = [] - self.chat_new_message_time[stream_id].append(current_time) - if len(self.chat_new_message_time[stream_id]) > self.number_of_message_storage: - self.chat_new_message_time[stream_id].pop(0) - - if stream_id not in self.chat_fatigue_punishment_list: - self.chat_fatigue_punishment_list[stream_id] = [ - ( - current_time, - self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60, - ) - ] - self.chat_fatigue_willing_attenuation[stream_id] = ( - -2 * self.basic_maximum_willing * self.fatigue_coefficient - ) - - @staticmethod - def _willing_to_probability(willing: float) -> float: - """意愿值转化为概率""" - willing = max(0, willing) - if willing < 2: - return math.atan(willing * 2) / math.pi * 2 - elif willing < 2.5: - return math.atan(willing * 4) / math.pi * 2 - else: - return 1 - - async def _chat_new_message_to_change_basic_willing(self): - """聊天流新消息改变基础意愿""" - update_time = 20 - while True: - await asyncio.sleep(update_time) - async with self.lock: - for chat_id, message_times in self.chat_new_message_time.items(): - # 清理过期消息 - current_time = time.time() - message_times = [ - msg_time - for msg_time in message_times - if current_time - msg_time - < self.number_of_message_storage - * self.basic_maximum_willing - / self.expected_replies_per_min - * 60 - ] - self.chat_new_message_time[chat_id] = message_times - - if len(message_times) < self.number_of_message_storage: - self.chat_reply_willing[chat_id] = self.basic_maximum_willing - update_time = 20 - elif len(message_times) == self.number_of_message_storage: - time_interval = current_time - message_times[0] - basic_willing = self._basic_willing_calculate(time_interval) - self.chat_reply_willing[chat_id] = basic_willing - update_time = 17 * basic_willing / self.basic_maximum_willing + 3 - else: - self.logger.debug(f"聊天流{chat_id}消息时间数量异常,数量:{len(message_times)}") - self.chat_reply_willing[chat_id] = 0 - if self.is_debug: - self.logger.debug(f"聊天流意愿值更新:{self.chat_reply_willing}") - - def _basic_willing_calculate(self, t: float) -> float: - """基础意愿值计算""" - return math.tan(t * self.expected_replies_per_min * math.pi / 120 / self.number_of_message_storage) / 2 - - async def _fatigue_attenuation(self): - """疲劳衰减""" - while True: - await asyncio.sleep(1) - current_time = time.time() - async with self.lock: - for chat_id, fatigue_list in self.chat_fatigue_punishment_list.items(): - fatigue_list = [z for z in fatigue_list if current_time - z[0] < z[1]] - self.chat_fatigue_willing_attenuation[chat_id] = 0 - for start_time, duration in fatigue_list: - self.chat_fatigue_willing_attenuation[chat_id] += ( - self.chat_reply_willing[chat_id] - * 2 - / math.pi - * math.asin(2 * (current_time - start_time) / duration - 1) - - self.chat_reply_willing[chat_id] - ) * self.fatigue_coefficient - - async def get_willing(self, chat_id): - return self.temporary_willing diff --git a/src/chat/willing/willing_manager.py b/src/chat/willing/willing_manager.py deleted file mode 100644 index 6b946f92c..000000000 --- a/src/chat/willing/willing_manager.py +++ /dev/null @@ -1,180 +0,0 @@ -import importlib -import asyncio - -from abc import ABC, abstractmethod -from typing import Dict, Optional, Any -from rich.traceback import install -from dataclasses import dataclass - -from src.common.logger import get_logger -from src.config.config import global_config -from src.chat.message_receive.chat_stream import ChatStream, GroupInfo -from src.person_info.person_info import PersonInfoManager, get_person_info_manager - -install(extra_lines=3) - -""" -基类方法概览: -以下8个方法是你必须在子类重写的(哪怕什么都不干): -async_task_starter 在程序启动时执行,在其中用asyncio.create_task启动你想要执行的异步任务 -before_generate_reply_handle 确定要回复后,在生成回复前的处理 -after_generate_reply_handle 确定要回复后,在生成回复后的处理 -not_reply_handle 确定不回复后的处理 -get_reply_probability 获取回复概率 -get_variable_parameters 暂不确定 -set_variable_parameters 暂不确定 -以下2个方法根据你的实现可以做调整: -get_willing 获取某聊天流意愿 -set_willing 设置某聊天流意愿 -规范说明: -模块文件命名: `mode_{manager_type}.py` -示例: 若 `manager_type="aggressive"`,则模块文件应为 `mode_aggressive.py` -类命名: `{manager_type}WillingManager` (首字母大写) -示例: 在 `mode_aggressive.py` 中,类名应为 `AggressiveWillingManager` -""" - - -logger = get_logger("willing") - - -@dataclass -class WillingInfo: - """此类保存意愿模块常用的参数 - - Attributes: - message (MessageRecv): 原始消息对象 - chat (ChatStream): 聊天流对象 - person_info_manager (PersonInfoManager): 用户信息管理对象 - chat_id (str): 当前聊天流的标识符 - person_id (str): 发送者的个人信息的标识符 - group_id (str): 群组ID(如果是私聊则为空) - is_mentioned_bot (bool): 是否提及了bot - is_emoji (bool): 是否为表情包 - interested_rate (float): 兴趣度 - """ - - message: Dict[str, Any] # 原始消息数据 - chat: ChatStream - person_info_manager: PersonInfoManager - chat_id: str - person_id: str - group_info: Optional[GroupInfo] - is_mentioned_bot: bool - is_emoji: bool - is_picid: bool - interested_rate: float - # current_mood: float 当前心情? - - -class BaseWillingManager(ABC): - """回复意愿管理基类""" - - @classmethod - def create(cls, manager_type: str) -> "BaseWillingManager": - try: - module = importlib.import_module(f".mode_{manager_type}", __package__) - manager_class = getattr(module, f"{manager_type.capitalize()}WillingManager") - if not issubclass(manager_class, cls): - raise TypeError(f"Manager class {manager_class.__name__} is not a subclass of {cls.__name__}") - else: - logger.info(f"普通回复模式:{manager_type}") - return manager_class() - except (ImportError, AttributeError, TypeError) as e: - module = importlib.import_module(".mode_classical", __package__) - manager_class = module.ClassicalWillingManager - logger.info(f"载入当前意愿模式{manager_type}失败,使用经典配方~~~~") - logger.debug(f"加载willing模式{manager_type}失败,原因: {str(e)}。") - return manager_class() - - def __init__(self): - self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿(chat_id) - self.ongoing_messages: Dict[str, WillingInfo] = {} # 当前正在进行的消息(message_id) - self.lock = asyncio.Lock() - self.logger = logger - - def setup(self, message: dict, chat: ChatStream): - person_id = PersonInfoManager.get_person_id(chat.platform, chat.user_info.user_id) # type: ignore - self.ongoing_messages[message.get("message_id", "")] = WillingInfo( - message=message, - chat=chat, - person_info_manager=get_person_info_manager(), - chat_id=chat.stream_id, - person_id=person_id, - group_info=chat.group_info, - is_mentioned_bot=message.get("is_mentioned", False), - is_emoji=message.get("is_emoji", False), - is_picid=message.get("is_picid", False), - interested_rate = message.get("interest_value") or 0.0, - ) - - def delete(self, message_id: str): - del_message = self.ongoing_messages.pop(message_id, None) - if not del_message: - logger.debug(f"尝试删除不存在的消息 ID: {message_id},可能已被其他流程处理,喵~") - - @abstractmethod - async def async_task_starter(self) -> None: - """抽象方法:异步任务启动器""" - pass - - @abstractmethod - async def before_generate_reply_handle(self, message_id: str): - """抽象方法:回复前处理""" - pass - - @abstractmethod - async def after_generate_reply_handle(self, message_id: str): - """抽象方法:回复后处理""" - pass - - @abstractmethod - async def not_reply_handle(self, message_id: str): - """抽象方法:不回复处理""" - pass - - @abstractmethod - async def get_reply_probability(self, message_id: str): - """抽象方法:获取回复概率""" - raise NotImplementedError - - async def get_willing(self, chat_id: str): - """获取指定聊天流的回复意愿""" - async with self.lock: - return self.chat_reply_willing.get(chat_id, 0) - - async def set_willing(self, chat_id: str, willing: float): - """设置指定聊天流的回复意愿""" - async with self.lock: - self.chat_reply_willing[chat_id] = willing - - # @abstractmethod - # async def get_variable_parameters(self) -> Dict[str, str]: - # """抽象方法:获取可变参数""" - # pass - - # @abstractmethod - # async def set_variable_parameters(self, parameters: Dict[str, any]): - # """抽象方法:设置可变参数""" - # pass - - -def init_willing_manager() -> BaseWillingManager: - """ - 根据配置初始化并返回对应的WillingManager实例 - - Returns: - 对应mode的WillingManager实例 - """ - mode = global_config.normal_chat.willing_mode.lower() - return BaseWillingManager.create(mode) - - -# 全局willing_manager对象 -willing_manager = None - - -def get_willing_manager(): - global willing_manager - if willing_manager is None: - willing_manager = init_willing_manager() - return willing_manager diff --git a/src/config/api_ada_configs.py b/src/config/api_ada_configs.py index 0292f7238..bd881bfdf 100644 --- a/src/config/api_ada_configs.py +++ b/src/config/api_ada_configs.py @@ -99,12 +99,9 @@ class ModelTaskConfig(ConfigBase): utils_small: TaskConfig """组件小模型配置""" - replyer_1: TaskConfig + replyer: TaskConfig """normal_chat首要回复模型模型配置""" - replyer_2: TaskConfig - """normal_chat次要回复模型配置""" - emotion: TaskConfig """情绪模型配置""" diff --git a/src/main.py b/src/main.py index 5e24d9bf9..eea65deba 100644 --- a/src/main.py +++ b/src/main.py @@ -6,7 +6,6 @@ from src.common.remote import TelemetryHeartBeatTask from src.manager.async_task_manager import async_task_manager from src.chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask from src.chat.emoji_system.emoji_manager import get_emoji_manager -from src.chat.willing.willing_manager import get_willing_manager from src.chat.message_receive.chat_stream import get_chat_manager from src.config.config import global_config from src.chat.message_receive.bot import chat_bot @@ -31,8 +30,6 @@ if global_config.memory.enable_memory: install(extra_lines=3) -willing_manager = get_willing_manager() - logger = get_logger("main") @@ -91,11 +88,6 @@ class MainSystem: get_emoji_manager().initialize() logger.info("表情包管理器初始化成功") - # 启动愿望管理器 - await willing_manager.async_task_starter() - - logger.info("willing管理器初始化成功") - # 启动情绪管理器 await mood_manager.start() logger.info("情绪管理器初始化成功") diff --git a/src/mais4u/mai_think.py b/src/mais4u/mai_think.py index 5a1f58082..3daa5875d 100644 --- a/src/mais4u/mai_think.py +++ b/src/mais4u/mai_think.py @@ -60,7 +60,7 @@ class MaiThinking: self.sender = "" self.target = "" - self.thinking_model = LLMRequest(model_set=model_config.model_task_config.replyer_1, request_type="thinking") + self.thinking_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type="thinking") async def do_think_before_response(self): pass diff --git a/src/mais4u/mais4u_chat/s4u_stream_generator.py b/src/mais4u/mais4u_chat/s4u_stream_generator.py index 43bf3599b..da12d9f9d 100644 --- a/src/mais4u/mais4u_chat/s4u_stream_generator.py +++ b/src/mais4u/mais4u_chat/s4u_stream_generator.py @@ -13,8 +13,8 @@ logger = get_logger("s4u_stream_generator") class S4UStreamGenerator: def __init__(self): - replyer_1_config = model_config.model_task_config.replyer_1 - model_to_use = replyer_1_config.model_list[0] + replyer_config = model_config.model_task_config.replyer + model_to_use = replyer_config.model_list[0] model_info = model_config.get_model_info(model_to_use) if not model_info: logger.error(f"模型 {model_to_use} 在配置中未找到") @@ -22,8 +22,8 @@ class S4UStreamGenerator: provider_name = model_info.api_provider provider_info = model_config.get_provider(provider_name) if not provider_info: - logger.error("`replyer_1` 找不到对应的Provider") - raise ValueError("`replyer_1` 找不到对应的Provider") + logger.error("`replyer` 找不到对应的Provider") + raise ValueError("`replyer` 找不到对应的Provider") api_key = provider_info.api_key base_url = provider_info.base_url @@ -34,7 +34,7 @@ class S4UStreamGenerator: self.client_1 = AsyncOpenAIClient(api_key=api_key, base_url=base_url) self.model_1_name = model_to_use - self.replyer_1_config = replyer_1_config + self.replyer_config = replyer_config self.current_model_name = "unknown model" self.partial_response = "" @@ -104,10 +104,10 @@ class S4UStreamGenerator: self.current_model_name = self.model_1_name extra_kwargs = {} - if self.replyer_1_config.get("enable_thinking") is not None: - extra_kwargs["enable_thinking"] = self.replyer_1_config.get("enable_thinking") - if self.replyer_1_config.get("thinking_budget") is not None: - extra_kwargs["thinking_budget"] = self.replyer_1_config.get("thinking_budget") + if self.replyer_config.get("enable_thinking") is not None: + extra_kwargs["enable_thinking"] = self.replyer_config.get("enable_thinking") + if self.replyer_config.get("thinking_budget") is not None: + extra_kwargs["thinking_budget"] = self.replyer_config.get("thinking_budget") async for chunk in self._generate_response_with_model( prompt, current_client, self.current_model_name, **extra_kwargs diff --git a/src/plugin_system/apis/generator_api.py b/src/plugin_system/apis/generator_api.py index e9bf23bff..51da1b025 100644 --- a/src/plugin_system/apis/generator_api.py +++ b/src/plugin_system/apis/generator_api.py @@ -32,7 +32,6 @@ logger = get_logger("generator_api") def get_replyer( chat_stream: Optional[ChatStream] = None, chat_id: Optional[str] = None, - model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, request_type: str = "replyer", ) -> Optional[DefaultReplyer]: """获取回复器对象 @@ -43,7 +42,6 @@ def get_replyer( Args: chat_stream: 聊天流对象(优先) chat_id: 聊天ID(实际上就是stream_id) - model_set_with_weight: 模型配置列表,每个元素为 (TaskConfig, weight) 元组 request_type: 请求类型 Returns: @@ -59,7 +57,6 @@ def get_replyer( return replyer_manager.get_replyer( chat_stream=chat_stream, chat_id=chat_id, - model_set_with_weight=model_set_with_weight, request_type=request_type, ) except Exception as e: @@ -78,13 +75,13 @@ async def generate_reply( chat_id: Optional[str] = None, action_data: Optional[Dict[str, Any]] = None, reply_to: str = "", + reply_message: Optional[Dict[str, Any]] = None, extra_info: str = "", available_actions: Optional[Dict[str, ActionInfo]] = None, enable_tool: bool = False, enable_splitter: bool = True, enable_chinese_typo: bool = True, return_prompt: bool = False, - model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, request_type: str = "generator_api", from_plugin: bool = True, ) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]: @@ -95,6 +92,7 @@ async def generate_reply( chat_id: 聊天ID(备用) action_data: 动作数据(向下兼容,包含reply_to和extra_info) reply_to: 回复对象,格式为 "发送者:消息内容" + reply_message: 回复的原始消息 extra_info: 额外信息,用于补充上下文 available_actions: 可用动作 enable_tool: 是否启用工具调用 @@ -110,7 +108,7 @@ async def generate_reply( try: # 获取回复器 replyer = get_replyer( - chat_stream, chat_id, model_set_with_weight=model_set_with_weight, request_type=request_type + chat_stream, chat_id, request_type=request_type ) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") @@ -131,6 +129,7 @@ async def generate_reply( enable_tool=enable_tool, from_plugin=from_plugin, stream_id=chat_stream.stream_id if chat_stream else chat_id, + reply_message=reply_message, ) if not success: logger.warning("[GeneratorAPI] 回复生成失败") @@ -166,11 +165,11 @@ async def rewrite_reply( chat_id: Optional[str] = None, enable_splitter: bool = True, enable_chinese_typo: bool = True, - model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, raw_reply: str = "", reason: str = "", reply_to: str = "", return_prompt: bool = False, + request_type: str = "generator_api", ) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]: """重写回复 @@ -191,7 +190,7 @@ async def rewrite_reply( """ try: # 获取回复器 - replyer = get_replyer(chat_stream, chat_id, model_set_with_weight=model_set_with_weight) + replyer = get_replyer(chat_stream, chat_id, request_type=request_type) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") return False, [], None @@ -258,10 +257,10 @@ def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: async def generate_response_custom( chat_stream: Optional[ChatStream] = None, chat_id: Optional[str] = None, - model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, + request_type: str = "generator_api", prompt: str = "", ) -> Optional[str]: - replyer = get_replyer(chat_stream, chat_id, model_set_with_weight=model_set_with_weight) + replyer = get_replyer(chat_stream, chat_id, request_type=request_type) if not replyer: logger.error("[GeneratorAPI] 无法获取回复器") return None diff --git a/src/plugin_system/apis/send_api.py b/src/plugin_system/apis/send_api.py index 10fbd804e..449e132f0 100644 --- a/src/plugin_system/apis/send_api.py +++ b/src/plugin_system/apis/send_api.py @@ -22,7 +22,7 @@ import traceback import time import difflib -from typing import Optional, Union +from typing import Optional, Union, Dict, Any from src.common.logger import get_logger # 导入依赖 @@ -49,7 +49,8 @@ async def _send_to_target( display_message: str = "", typing: bool = False, reply_to: str = "", - reply_to_platform_id: Optional[str] = None, + set_reply: bool = False, + reply_to_message: Optional[Dict[str, Any]] = None, storage_message: bool = True, show_log: bool = True, ) -> bool: @@ -62,7 +63,6 @@ async def _send_to_target( display_message: 显示消息 typing: 是否模拟打字等待。 reply_to: 回复消息,格式为"发送者:消息内容" - reply_to_platform_id: 回复消息,格式为"平台:用户ID",如果不提供则自动查找(插件开发者禁用!) storage_message: 是否存储消息到数据库 show_log: 发送是否显示日志 @@ -70,6 +70,9 @@ async def _send_to_target( bool: 是否发送成功 """ try: + if reply_to: + logger.warning("[SendAPI] 在0.10.0, reply_to 参数已弃用,请使用 reply_to_message 参数") + if show_log: logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}") @@ -96,14 +99,14 @@ async def _send_to_target( # 创建消息段 message_segment = Seg(type=message_type, data=content) # type: ignore - # 处理回复消息 - anchor_message = None - if reply_to: - anchor_message = await _find_reply_message(target_stream, reply_to) - if anchor_message and anchor_message.message_info.user_info and not reply_to_platform_id: - reply_to_platform_id = ( - f"{anchor_message.message_info.platform}:{anchor_message.message_info.user_info.user_id}" - ) + if reply_to_message: + anchor_message = MessageRecv(message_dict=reply_to_message) + anchor_message.update_chat_stream(target_stream) + reply_to_platform_id = ( + f"{anchor_message.message_info.platform}:{anchor_message.message_info.user_info.user_id}" + ) + else: + anchor_message = None # 构建发送消息对象 bot_message = MessageSending( @@ -124,7 +127,7 @@ async def _send_to_target( sent_msg = await heart_fc_sender.send_message( bot_message, typing=typing, - set_reply=(anchor_message is not None), + set_reply=set_reply, storage_message=storage_message, show_log=show_log, ) @@ -259,7 +262,8 @@ async def text_to_stream( stream_id: str, typing: bool = False, reply_to: str = "", - reply_to_platform_id: str = "", + reply_to_message: Optional[Dict[str, Any]] = None, + set_reply: bool = False, storage_message: bool = True, ) -> bool: """向指定流发送文本消息 @@ -269,7 +273,6 @@ async def text_to_stream( stream_id: 聊天流ID typing: 是否显示正在输入 reply_to: 回复消息,格式为"发送者:消息内容" - reply_to_platform_id: 回复消息,格式为"平台:用户ID",如果不提供则自动查找(插件开发者禁用!) storage_message: 是否存储消息到数据库 Returns: @@ -282,12 +285,13 @@ async def text_to_stream( "", typing, reply_to, - reply_to_platform_id=reply_to_platform_id, + set_reply=set_reply, + reply_to_message=reply_to_message, storage_message=storage_message, ) -async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bool = True) -> bool: +async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False) -> bool: """向指定流发送表情包 Args: @@ -298,10 +302,10 @@ async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bo Returns: bool: 是否发送成功 """ - return await _send_to_target("emoji", emoji_base64, stream_id, "", typing=False, storage_message=storage_message) + return await _send_to_target("emoji", emoji_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply) -async def image_to_stream(image_base64: str, stream_id: str, storage_message: bool = True) -> bool: +async def image_to_stream(image_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False) -> bool: """向指定流发送图片 Args: @@ -312,11 +316,11 @@ async def image_to_stream(image_base64: str, stream_id: str, storage_message: bo Returns: bool: 是否发送成功 """ - return await _send_to_target("image", image_base64, stream_id, "", typing=False, storage_message=storage_message) + return await _send_to_target("image", image_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply) async def command_to_stream( - command: Union[str, dict], stream_id: str, storage_message: bool = True, display_message: str = "" + command: Union[str, dict], stream_id: str, storage_message: bool = True, display_message: str = "", set_reply: bool = False ) -> bool: """向指定流发送命令 @@ -329,7 +333,7 @@ async def command_to_stream( bool: 是否发送成功 """ return await _send_to_target( - "command", command, stream_id, display_message, typing=False, storage_message=storage_message + "command", command, stream_id, display_message, typing=False, storage_message=storage_message, set_reply=set_reply ) @@ -340,6 +344,8 @@ async def custom_to_stream( display_message: str = "", typing: bool = False, reply_to: str = "", + reply_to_message: Optional[Dict[str, Any]] = None, + set_reply: bool = False, storage_message: bool = True, show_log: bool = True, ) -> bool: @@ -364,6 +370,8 @@ async def custom_to_stream( display_message=display_message, typing=typing, reply_to=reply_to, + reply_to_message=reply_to_message, + set_reply=set_reply, storage_message=storage_message, show_log=show_log, ) diff --git a/template/model_config_template.toml b/template/model_config_template.toml index 77993954a..92ac8881f 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.2.0" +version = "1.3.0" # 配置文件版本号迭代规则同bot_config.toml @@ -112,16 +112,11 @@ model_list = ["qwen3-8b"] temperature = 0.7 max_tokens = 800 -[model_task_config.replyer_1] # 首要回复模型,还用于表达器和表达方式学习 +[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习 model_list = ["siliconflow-deepseek-v3"] temperature = 0.2 # 模型温度,新V3建议0.1-0.3 max_tokens = 800 -[model_task_config.replyer_2] # 次要回复模型 -model_list = ["siliconflow-deepseek-v3"] -temperature = 0.7 -max_tokens = 800 - [model_task_config.planner] #决策:负责决定麦麦该做什么的模型 model_list = ["siliconflow-deepseek-v3"] temperature = 0.3