From d0ad70924d3f53c1e3fd612090cf267aeff5f2dc Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 23:49:12 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9A=E5=8F=AF=E9=80=89=E6=89=93?= =?UTF-8?q?=E5=BC=80prompt=E6=98=BE=E7=A4=BA=EF=BC=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/normal_chat/normal_chat.py | 13 +++++++------ src/chat/planner_actions/planner.py | 9 +++++---- src/chat/replyer/default_generator.py | 10 +++++++--- src/config/config.py | 3 ++- src/config/official_configs.py | 13 ++++++++++--- src/mais4u/mais4u_chat/s4u_chat.py | 2 -- template/bot_config_template.toml | 8 ++++++-- 7 files changed, 37 insertions(+), 21 deletions(-) diff --git a/src/chat/normal_chat/normal_chat.py b/src/chat/normal_chat/normal_chat.py index e69e2a562..569584eb5 100644 --- a/src/chat/normal_chat/normal_chat.py +++ b/src/chat/normal_chat/normal_chat.py @@ -224,7 +224,7 @@ class NormalChat: mark_head = False first_bot_msg = None for msg in response_set: - if global_config.experimental.debug_show_chat_mode: + if global_config.debug.debug_show_chat_mode: msg += "ⁿ" message_segment = Seg(type="text", data=msg) bot_message = MessageSending( @@ -434,11 +434,12 @@ class NormalChat: # current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time)) # 使用 self.stream_id # willing_log = f"[激活值:{await willing_manager.get_willing(self.stream_id):.2f}]" if is_willing else "" - logger.info( - f"[{mes_name}]" - f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream - f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]" - ) + if reply_probability > 0.1: + logger.info( + f"[{mes_name}]" + f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream + f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]" + ) do_reply = False response_set = None # 初始化 response_set if random() < reply_probability: diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 8dd4ecdc3..135ea6bac 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -119,10 +119,11 @@ class ActionPlanner: try: llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt) - logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") - logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}") - if reasoning_content: - logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}") + logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}") + if reasoning_content: + logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}") except Exception as req_e: logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}") diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 51f62ba96..d9a7feda0 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -217,7 +217,9 @@ class DefaultReplyer: request_type=self.request_type, ) - logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n") + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n") + content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt) logger.info(f"最终回复: {content}") @@ -560,7 +562,9 @@ class DefaultReplyer: for name, result, duration in task_results: results_dict[name] = result timing_logs.append(f"{name}: {duration:.4f}s") - logger.info(f"回复生成前信息获取时间: {'; '.join(timing_logs)}") + if duration > 8: + logger.warning(f"回复生成前信息获取耗时过长: {name} 耗时: {duration:.4f}s,请使用更快的模型") + logger.info(f"回复生成前信息获取耗时: {'; '.join(timing_logs)}") expression_habits_block = results_dict["build_expression_habits"] relation_info = results_dict["build_relation_info"] @@ -850,7 +854,7 @@ class DefaultReplyer: type = msg_text[0] data = msg_text[1] - if global_config.experimental.debug_show_chat_mode and type == "text": + if global_config.debug.debug_show_chat_mode and type == "text": data += "ᶠ" part_message_id = f"{thinking_id}_{i}" diff --git a/src/config/config.py b/src/config/config.py index 641353809..ee6e2dbc6 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -35,6 +35,7 @@ from src.config.official_configs import ( LPMMKnowledgeConfig, RelationshipConfig, ToolConfig, + DebugConfig, ) install(extra_lines=3) @@ -165,7 +166,7 @@ class Config(ConfigBase): maim_message: MaimMessageConfig lpmm_knowledge: LPMMKnowledgeConfig tool: ToolConfig - + debug: DebugConfig def load_config(config_path: str) -> Config: """ diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 335b95c77..c1c4bab48 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -529,14 +529,21 @@ class TelemetryConfig(ConfigBase): enable: bool = True """是否启用遥测""" +@dataclass +class DebugConfig(ConfigBase): + """调试配置类""" + + debug_show_chat_mode: bool = False + """是否在回复后显示当前聊天模式""" + + show_prompt: bool = False + """是否显示prompt""" + @dataclass class ExperimentalConfig(ConfigBase): """实验功能配置类""" - debug_show_chat_mode: bool = False - """是否在回复后显示当前聊天模式""" - enable_friend_chat: bool = False """是否启用好友聊天""" diff --git a/src/mais4u/mais4u_chat/s4u_chat.py b/src/mais4u/mais4u_chat/s4u_chat.py index 28c19ab74..825135f62 100644 --- a/src/mais4u/mais4u_chat/s4u_chat.py +++ b/src/mais4u/mais4u_chat/s4u_chat.py @@ -77,8 +77,6 @@ class MessageSenderContainer: msg_id = f"{current_time}_{random.randint(1000, 9999)}" text_to_send = chunk - if global_config.experimental.debug_show_chat_mode: - text_to_send += "ⁿ" message_segment = Seg(type="text", data=text_to_send) bot_message = MessageSending( diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index b8781cea9..50b28d16c 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "3.5.0" +version = "3.6.0" #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #如果你想要修改配置文件,请在修改后将version的值进行变更 @@ -231,6 +231,11 @@ library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别 # enable_thinking = : 用于指定模型是否启用思考 # thinking_budget = : 用于指定模型思考最长长度 +[debug] +show_prompt = false # 是否显示prompt +debug_show_chat_mode = false # 是否在回复后显示当前聊天模式 + + [model] model_max_output_length = 1000 # 模型单次返回的最大token数 @@ -366,7 +371,6 @@ key_file = "" # SSL密钥文件路径,仅在use_wss=true时有效 enable = true [experimental] #实验性功能 -debug_show_chat_mode = false # 是否在回复后显示当前聊天模式 enable_friend_chat = false # 是否启用好友聊天