feat:可选打开prompt显示,

This commit is contained in:
SengokuCola
2025-07-06 23:49:12 +08:00
parent 61c55d8b3b
commit d0ad70924d
7 changed files with 37 additions and 21 deletions

View File

@@ -224,7 +224,7 @@ class NormalChat:
mark_head = False
first_bot_msg = None
for msg in response_set:
if global_config.experimental.debug_show_chat_mode:
if global_config.debug.debug_show_chat_mode:
msg += ""
message_segment = Seg(type="text", data=msg)
bot_message = MessageSending(
@@ -434,11 +434,12 @@ class NormalChat:
# current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time))
# 使用 self.stream_id
# willing_log = f"[激活值:{await willing_manager.get_willing(self.stream_id):.2f}]" if is_willing else ""
logger.info(
f"[{mes_name}]"
f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream
f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]"
)
if reply_probability > 0.1:
logger.info(
f"[{mes_name}]"
f"{message.message_info.user_info.user_nickname}:" # 使用 self.chat_stream
f"{message.processed_plain_text}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]"
)
do_reply = False
response_set = None # 初始化 response_set
if random() < reply_probability:

View File

@@ -119,10 +119,11 @@ class ActionPlanner:
try:
llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt)
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
if reasoning_content:
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
logger.info(f"{self.log_prefix}规划器原始响应: {llm_content}")
if reasoning_content:
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
except Exception as req_e:
logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}")

View File

@@ -217,7 +217,9 @@ class DefaultReplyer:
request_type=self.request_type,
)
logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n")
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix}Prompt:\n{prompt}\n")
content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt)
logger.info(f"最终回复: {content}")
@@ -560,7 +562,9 @@ class DefaultReplyer:
for name, result, duration in task_results:
results_dict[name] = result
timing_logs.append(f"{name}: {duration:.4f}s")
logger.info(f"回复生成前信息获取时间: {'; '.join(timing_logs)}")
if duration > 8:
logger.warning(f"回复生成前信息获取耗时过长: {name} 耗时: {duration:.4f}s请使用更快的模型")
logger.info(f"回复生成前信息获取耗时: {'; '.join(timing_logs)}")
expression_habits_block = results_dict["build_expression_habits"]
relation_info = results_dict["build_relation_info"]
@@ -850,7 +854,7 @@ class DefaultReplyer:
type = msg_text[0]
data = msg_text[1]
if global_config.experimental.debug_show_chat_mode and type == "text":
if global_config.debug.debug_show_chat_mode and type == "text":
data += ""
part_message_id = f"{thinking_id}_{i}"

View File

@@ -35,6 +35,7 @@ from src.config.official_configs import (
LPMMKnowledgeConfig,
RelationshipConfig,
ToolConfig,
DebugConfig,
)
install(extra_lines=3)
@@ -165,7 +166,7 @@ class Config(ConfigBase):
maim_message: MaimMessageConfig
lpmm_knowledge: LPMMKnowledgeConfig
tool: ToolConfig
debug: DebugConfig
def load_config(config_path: str) -> Config:
"""

View File

@@ -529,14 +529,21 @@ class TelemetryConfig(ConfigBase):
enable: bool = True
"""是否启用遥测"""
@dataclass
class DebugConfig(ConfigBase):
"""调试配置类"""
debug_show_chat_mode: bool = False
"""是否在回复后显示当前聊天模式"""
show_prompt: bool = False
"""是否显示prompt"""
@dataclass
class ExperimentalConfig(ConfigBase):
"""实验功能配置类"""
debug_show_chat_mode: bool = False
"""是否在回复后显示当前聊天模式"""
enable_friend_chat: bool = False
"""是否启用好友聊天"""

View File

@@ -77,8 +77,6 @@ class MessageSenderContainer:
msg_id = f"{current_time}_{random.randint(1000, 9999)}"
text_to_send = chunk
if global_config.experimental.debug_show_chat_mode:
text_to_send += ""
message_segment = Seg(type="text", data=text_to_send)
bot_message = MessageSending(

View File

@@ -1,5 +1,5 @@
[inner]
version = "3.5.0"
version = "3.6.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更
@@ -231,6 +231,11 @@ library_log_levels = { "aiohttp" = "WARNING"} # 设置特定库的日志级别
# enable_thinking = <true|false> : 用于指定模型是否启用思考
# thinking_budget = <int> : 用于指定模型思考最长长度
[debug]
show_prompt = false # 是否显示prompt
debug_show_chat_mode = false # 是否在回复后显示当前聊天模式
[model]
model_max_output_length = 1000 # 模型单次返回的最大token数
@@ -366,7 +371,6 @@ key_file = "" # SSL密钥文件路径仅在use_wss=true时有效
enable = true
[experimental] #实验性功能
debug_show_chat_mode = false # 是否在回复后显示当前聊天模式
enable_friend_chat = false # 是否启用好友聊天