Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into dev
This commit is contained in:
@@ -20,6 +20,7 @@
|
|||||||
- **流程优化**: 拆分了子心流的思考模块,使整体对话流程更加清晰。
|
- **流程优化**: 拆分了子心流的思考模块,使整体对话流程更加清晰。
|
||||||
- **状态判断改进**: 将 CHAT 状态判断交给 LLM 处理,使对话更自然。
|
- **状态判断改进**: 将 CHAT 状态判断交给 LLM 处理,使对话更自然。
|
||||||
- **回复机制**: 实现更为灵活的概率回复机制,使机器人能够自然地融入群聊环境。
|
- **回复机制**: 实现更为灵活的概率回复机制,使机器人能够自然地融入群聊环境。
|
||||||
|
- **重复性检查**: 加入心流回复重复性检查机制,防止麦麦陷入固定回复模式。
|
||||||
|
|
||||||
#### 全新知识库系统 (New Knowledge Base System - LPMM)
|
#### 全新知识库系统 (New Knowledge Base System - LPMM)
|
||||||
- **引入 LPMM**: 新增了 **LPMM (Large Psychology Model Maker)** 知识库系统,具有强大的信息检索能力,能显著提升麦麦获取和利用知识的效率。
|
- **引入 LPMM**: 新增了 **LPMM (Large Psychology Model Maker)** 知识库系统,具有强大的信息检索能力,能显著提升麦麦获取和利用知识的效率。
|
||||||
@@ -32,8 +33,11 @@
|
|||||||
|
|
||||||
#### 记忆与上下文增强 (Memory and Context Enhancement)
|
#### 记忆与上下文增强 (Memory and Context Enhancement)
|
||||||
- **聊天记录压缩**: 大幅优化聊天记录压缩系统,使机器人能够处理5倍于之前的上下文记忆量。
|
- **聊天记录压缩**: 大幅优化聊天记录压缩系统,使机器人能够处理5倍于之前的上下文记忆量。
|
||||||
|
- **长消息截断**: 新增了长消息自动截断与模糊化功能,随着时间推移降低超长消息的权重,避免被特定冗余信息干扰。
|
||||||
- **记忆提取**: 优化记忆提取功能,提高对历史对话的理解和引用能力。
|
- **记忆提取**: 优化记忆提取功能,提高对历史对话的理解和引用能力。
|
||||||
|
- **记忆整合**: 为记忆系统加入了合并与整合机制,优化长期记忆的结构与效率。
|
||||||
- **中期记忆调用**: 完善中期记忆调用机制,使机器人能够更自然地回忆和引用较早前的对话。
|
- **中期记忆调用**: 完善中期记忆调用机制,使机器人能够更自然地回忆和引用较早前的对话。
|
||||||
|
- **Prompt 优化**: 进一步优化了关系系统和记忆系统相关的提示词(prompt)。
|
||||||
|
|
||||||
#### 私聊 PFC 功能增强 (Private Chat PFC Enhancement)
|
#### 私聊 PFC 功能增强 (Private Chat PFC Enhancement)
|
||||||
- **功能修复与优化**: 修复了私聊 PFC 载入聊天记录缺失的 bug,优化了 prompt 构建,增加了审核机制,调整了重试次数,并将机器人发言存入数据库。
|
- **功能修复与优化**: 修复了私聊 PFC 载入聊天记录缺失的 bug,优化了 prompt 构建,增加了审核机制,调整了重试次数,并将机器人发言存入数据库。
|
||||||
@@ -41,9 +45,9 @@
|
|||||||
|
|
||||||
#### 情感与互动增强 (Emotion and Interaction Enhancement)
|
#### 情感与互动增强 (Emotion and Interaction Enhancement)
|
||||||
- **全新表情包系统**: 新的表情包系统上线,表情含义更丰富,发送更快速。
|
- **全新表情包系统**: 新的表情包系统上线,表情含义更丰富,发送更快速。
|
||||||
|
- **表情包使用优化**: 优化了表情包的选择逻辑,减少重复使用特定表情包的情况,使表达更生动。
|
||||||
- **提示词优化**: 优化提示词(prompt)构建,增强对话质量和情感表达。
|
- **提示词优化**: 优化提示词(prompt)构建,增强对话质量和情感表达。
|
||||||
- **积极性配置**: 优化"让麦麦更愿意说话"的相关配置,使机器人更积极参与对话。
|
- **积极性配置**: 优化"让麦麦更愿意说话"的相关配置,使机器人更积极参与对话。
|
||||||
- **命名统一**: 实现统一命名功能,自动替换 prompt 内唯一标识符,优化 prompt 效果。
|
|
||||||
- **颜文字保护**: 保护颜文字处理机制,确保表情正确显示。
|
- **颜文字保护**: 保护颜文字处理机制,确保表情正确显示。
|
||||||
|
|
||||||
#### 工具与集成 (Tools and Integration)
|
#### 工具与集成 (Tools and Integration)
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ REFRESH_INTERVAL_MS = 200 # 刷新间隔 (毫秒) - 可以适当调长,因为
|
|||||||
WINDOW_TITLE = "Interest Monitor (Live History)"
|
WINDOW_TITLE = "Interest Monitor (Live History)"
|
||||||
MAX_HISTORY_POINTS = 1000 # 图表上显示的最大历史点数 (可以增加)
|
MAX_HISTORY_POINTS = 1000 # 图表上显示的最大历史点数 (可以增加)
|
||||||
MAX_STREAMS_TO_DISPLAY = 15 # 最多显示多少个聊天流的折线图 (可以增加)
|
MAX_STREAMS_TO_DISPLAY = 15 # 最多显示多少个聊天流的折线图 (可以增加)
|
||||||
|
MAX_QUEUE_SIZE = 30 # 新增:历史想法队列最大长度
|
||||||
|
|
||||||
# *** 添加 Matplotlib 中文字体配置 ***
|
# *** 添加 Matplotlib 中文字体配置 ***
|
||||||
# 尝试使用 'SimHei' 或 'Microsoft YaHei',如果找不到,matplotlib 会回退到默认字体
|
# 尝试使用 'SimHei' 或 'Microsoft YaHei',如果找不到,matplotlib 会回退到默认字体
|
||||||
@@ -61,6 +62,10 @@ class InterestMonitorApp:
|
|||||||
self.single_stream_last_active = tk.StringVar(value="活跃: N/A")
|
self.single_stream_last_active = tk.StringVar(value="活跃: N/A")
|
||||||
self.single_stream_last_interaction = tk.StringVar(value="交互: N/A")
|
self.single_stream_last_interaction = tk.StringVar(value="交互: N/A")
|
||||||
|
|
||||||
|
# 新增:历史想法队列
|
||||||
|
self.main_mind_history = deque(maxlen=MAX_QUEUE_SIZE)
|
||||||
|
self.last_main_mind_timestamp = 0 # 记录最后一条main_mind的时间戳
|
||||||
|
|
||||||
# --- UI 元素 ---
|
# --- UI 元素 ---
|
||||||
|
|
||||||
# --- 新增:顶部全局信息框架 ---
|
# --- 新增:顶部全局信息框架 ---
|
||||||
@@ -143,6 +148,24 @@ class InterestMonitorApp:
|
|||||||
self.canvas_widget_single = self.canvas_single.get_tk_widget()
|
self.canvas_widget_single = self.canvas_single.get_tk_widget()
|
||||||
self.canvas_widget_single.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
|
self.canvas_widget_single.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
|
||||||
|
|
||||||
|
# --- 新增第三个选项卡:麦麦历史想法 ---
|
||||||
|
self.frame_mind_history = ttk.Frame(self.notebook, padding="5 5 5 5")
|
||||||
|
self.notebook.add(self.frame_mind_history, text="麦麦历史想法")
|
||||||
|
|
||||||
|
# 聊天框样式的文本框(只读)+ 滚动条
|
||||||
|
self.mind_text_scroll = tk.Scrollbar(self.frame_mind_history)
|
||||||
|
self.mind_text_scroll.pack(side=tk.RIGHT, fill=tk.Y)
|
||||||
|
self.mind_text = tk.Text(
|
||||||
|
self.frame_mind_history,
|
||||||
|
height=25,
|
||||||
|
state="disabled",
|
||||||
|
wrap="word",
|
||||||
|
font=("微软雅黑", 12),
|
||||||
|
yscrollcommand=self.mind_text_scroll.set,
|
||||||
|
)
|
||||||
|
self.mind_text.pack(side=tk.LEFT, fill=tk.BOTH, expand=1, padx=5, pady=5)
|
||||||
|
self.mind_text_scroll.config(command=self.mind_text.yview)
|
||||||
|
|
||||||
# --- 初始化和启动刷新 ---
|
# --- 初始化和启动刷新 ---
|
||||||
self.update_display() # 首次加载并开始刷新循环
|
self.update_display() # 首次加载并开始刷新循环
|
||||||
|
|
||||||
@@ -154,6 +177,78 @@ class InterestMonitorApp:
|
|||||||
"""生成随机颜色用于区分线条"""
|
"""生成随机颜色用于区分线条"""
|
||||||
return "#{:06x}".format(random.randint(0, 0xFFFFFF))
|
return "#{:06x}".format(random.randint(0, 0xFFFFFF))
|
||||||
|
|
||||||
|
def load_main_mind_history(self):
|
||||||
|
"""只读取包含main_mind的日志行,维护历史想法队列"""
|
||||||
|
if not os.path.exists(LOG_FILE_PATH):
|
||||||
|
return
|
||||||
|
|
||||||
|
main_mind_entries = []
|
||||||
|
try:
|
||||||
|
with open(LOG_FILE_PATH, "r", encoding="utf-8") as f:
|
||||||
|
for line in f:
|
||||||
|
try:
|
||||||
|
log_entry = json.loads(line.strip())
|
||||||
|
if "main_mind" in log_entry:
|
||||||
|
ts = log_entry.get("timestamp", 0)
|
||||||
|
main_mind_entries.append((ts, log_entry))
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
main_mind_entries.sort(key=lambda x: x[0])
|
||||||
|
recent_entries = main_mind_entries[-MAX_QUEUE_SIZE:]
|
||||||
|
self.main_mind_history.clear()
|
||||||
|
for _ts, entry in recent_entries:
|
||||||
|
self.main_mind_history.append(entry)
|
||||||
|
if recent_entries:
|
||||||
|
self.last_main_mind_timestamp = recent_entries[-1][0]
|
||||||
|
# 首次加载时刷新
|
||||||
|
self.refresh_mind_text()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def update_main_mind_history(self):
|
||||||
|
"""实时监控log文件,发现新main_mind数据则更新队列和展示(仅有新数据时刷新)"""
|
||||||
|
if not os.path.exists(LOG_FILE_PATH):
|
||||||
|
return
|
||||||
|
|
||||||
|
new_entries = []
|
||||||
|
try:
|
||||||
|
with open(LOG_FILE_PATH, "r", encoding="utf-8") as f:
|
||||||
|
for line in reversed(list(f)):
|
||||||
|
try:
|
||||||
|
log_entry = json.loads(line.strip())
|
||||||
|
if "main_mind" in log_entry:
|
||||||
|
ts = log_entry.get("timestamp", 0)
|
||||||
|
if ts > self.last_main_mind_timestamp:
|
||||||
|
new_entries.append((ts, log_entry))
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
if new_entries:
|
||||||
|
for ts, entry in sorted(new_entries):
|
||||||
|
if len(self.main_mind_history) >= MAX_QUEUE_SIZE:
|
||||||
|
self.main_mind_history.popleft()
|
||||||
|
self.main_mind_history.append(entry)
|
||||||
|
self.last_main_mind_timestamp = ts
|
||||||
|
self.refresh_mind_text() # 只有有新数据时才刷新
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def refresh_mind_text(self):
|
||||||
|
"""刷新聊天框样式的历史想法展示"""
|
||||||
|
self.mind_text.config(state="normal")
|
||||||
|
self.mind_text.delete(1.0, tk.END)
|
||||||
|
for entry in self.main_mind_history:
|
||||||
|
ts = entry.get("timestamp", 0)
|
||||||
|
dt_str = datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S") if ts else ""
|
||||||
|
main_mind = entry.get("main_mind", "")
|
||||||
|
mai_state = entry.get("mai_state", "")
|
||||||
|
subflow_count = entry.get("subflow_count", "")
|
||||||
|
msg = f"[{dt_str}] 状态:{mai_state} 子流:{subflow_count}\n{main_mind}\n\n"
|
||||||
|
self.mind_text.insert(tk.END, msg)
|
||||||
|
self.mind_text.see(tk.END)
|
||||||
|
self.mind_text.config(state="disabled")
|
||||||
|
|
||||||
def load_and_update_history(self):
|
def load_and_update_history(self):
|
||||||
"""从 history log 文件加载数据并更新历史记录"""
|
"""从 history log 文件加载数据并更新历史记录"""
|
||||||
if not os.path.exists(LOG_FILE_PATH):
|
if not os.path.exists(LOG_FILE_PATH):
|
||||||
@@ -537,8 +632,14 @@ class InterestMonitorApp:
|
|||||||
def update_display(self):
|
def update_display(self):
|
||||||
"""主更新循环"""
|
"""主更新循环"""
|
||||||
try:
|
try:
|
||||||
self.load_and_update_history() # 从文件加载数据并更新内部状态
|
# --- 新增:首次加载历史想法 ---
|
||||||
|
if not hasattr(self, "_main_mind_loaded"):
|
||||||
|
self.load_main_mind_history()
|
||||||
|
self._main_mind_loaded = True
|
||||||
|
else:
|
||||||
|
self.update_main_mind_history() # 只有有新main_mind数据时才刷新界面
|
||||||
# *** 修改:分别调用两个图表的更新方法 ***
|
# *** 修改:分别调用两个图表的更新方法 ***
|
||||||
|
self.load_and_update_history() # 从文件加载数据并更新内部状态
|
||||||
self.update_all_streams_plot() # 更新所有流的图表
|
self.update_all_streams_plot() # 更新所有流的图表
|
||||||
self.update_single_stream_plot() # 更新单个流的图表
|
self.update_single_stream_plot() # 更新单个流的图表
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -793,6 +793,22 @@ LPMM_GET_KNOWLEDGE_TOOL_STYLE_CONFIG = {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INIT_STYLE_CONFIG = {
|
||||||
|
"advanced": {
|
||||||
|
"console_format": (
|
||||||
|
"<white>{time:YYYY-MM-DD HH:mm:ss}</white> | "
|
||||||
|
"<level>{level: <8}</level> | "
|
||||||
|
"<light-yellow>初始化</light-yellow> | "
|
||||||
|
"<level>{message}</level>"
|
||||||
|
),
|
||||||
|
"file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 初始化 | {message}",
|
||||||
|
},
|
||||||
|
"simple": {
|
||||||
|
"console_format": "<level>{time:MM-DD HH:mm}</level> | <light-green>初始化</light-green> | {message}",
|
||||||
|
"file_format": "{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {extra[module]: <15} | 初始化 | {message}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# 根据SIMPLE_OUTPUT选择配置
|
# 根据SIMPLE_OUTPUT选择配置
|
||||||
MAIN_STYLE_CONFIG = MAIN_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MAIN_STYLE_CONFIG["advanced"]
|
MAIN_STYLE_CONFIG = MAIN_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else MAIN_STYLE_CONFIG["advanced"]
|
||||||
@@ -862,6 +878,7 @@ CHAT_MESSAGE_STYLE_CONFIG = (
|
|||||||
CHAT_MESSAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_MESSAGE_STYLE_CONFIG["advanced"]
|
CHAT_MESSAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_MESSAGE_STYLE_CONFIG["advanced"]
|
||||||
)
|
)
|
||||||
CHAT_IMAGE_STYLE_CONFIG = CHAT_IMAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_IMAGE_STYLE_CONFIG["advanced"]
|
CHAT_IMAGE_STYLE_CONFIG = CHAT_IMAGE_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else CHAT_IMAGE_STYLE_CONFIG["advanced"]
|
||||||
|
INIT_STYLE_CONFIG = INIT_STYLE_CONFIG["simple"] if SIMPLE_OUTPUT else INIT_STYLE_CONFIG["advanced"]
|
||||||
|
|
||||||
|
|
||||||
def is_registered_module(record: dict) -> bool:
|
def is_registered_module(record: dict) -> bool:
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ from src.common.logger import (
|
|||||||
MESSAGE_BUFFER_STYLE_CONFIG,
|
MESSAGE_BUFFER_STYLE_CONFIG,
|
||||||
CHAT_MESSAGE_STYLE_CONFIG,
|
CHAT_MESSAGE_STYLE_CONFIG,
|
||||||
CHAT_IMAGE_STYLE_CONFIG,
|
CHAT_IMAGE_STYLE_CONFIG,
|
||||||
|
INIT_STYLE_CONFIG,
|
||||||
)
|
)
|
||||||
|
|
||||||
# 可根据实际需要补充更多模块配置
|
# 可根据实际需要补充更多模块配置
|
||||||
@@ -84,6 +85,7 @@ MODULE_LOGGER_CONFIGS = {
|
|||||||
"message_buffer": MESSAGE_BUFFER_STYLE_CONFIG, # 消息缓冲
|
"message_buffer": MESSAGE_BUFFER_STYLE_CONFIG, # 消息缓冲
|
||||||
"chat_message": CHAT_MESSAGE_STYLE_CONFIG, # 聊天消息
|
"chat_message": CHAT_MESSAGE_STYLE_CONFIG, # 聊天消息
|
||||||
"chat_image": CHAT_IMAGE_STYLE_CONFIG, # 聊天图片
|
"chat_image": CHAT_IMAGE_STYLE_CONFIG, # 聊天图片
|
||||||
|
"init": INIT_STYLE_CONFIG, # 初始化
|
||||||
# ...如有更多模块,继续添加...
|
# ...如有更多模块,继续添加...
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -182,10 +182,10 @@ class BotConfig:
|
|||||||
|
|
||||||
# [heartflow] # 启用启用heart_flowC(心流聊天)模式时生效, 需要填写token消耗量巨大的相关模型
|
# [heartflow] # 启用启用heart_flowC(心流聊天)模式时生效, 需要填写token消耗量巨大的相关模型
|
||||||
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间), 进行长时间高质量的聊天
|
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间), 进行长时间高质量的聊天
|
||||||
enable_heart_flowC: bool = True # 是否启用heart_flowC(心流聊天, HFC)模式
|
|
||||||
reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发
|
reply_trigger_threshold: float = 3.0 # 心流聊天触发阈值,越低越容易触发
|
||||||
probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快
|
probability_decay_factor_per_second: float = 0.2 # 概率衰减因子,越大衰减越快
|
||||||
default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
|
default_decay_rate_per_second: float = 0.98 # 默认衰减率,越大衰减越慢
|
||||||
|
allow_focus_mode: bool = True # 是否允许子心流进入 FOCUSED 状态
|
||||||
|
|
||||||
# sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒
|
# sub_heart_flow_update_interval: int = 60 # 子心流更新频率,间隔 单位秒
|
||||||
# sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
# sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
||||||
@@ -418,10 +418,6 @@ class BotConfig:
|
|||||||
"model_normal_probability", config.model_normal_probability
|
"model_normal_probability", config.model_normal_probability
|
||||||
)
|
)
|
||||||
|
|
||||||
# 添加 enable_heart_flowC 的加载逻辑 (假设它在 [response] 部分)
|
|
||||||
if config.INNER_VERSION in SpecifierSet(">=1.4.0"):
|
|
||||||
config.enable_heart_flowC = response_config.get("enable_heart_flowC", config.enable_heart_flowC)
|
|
||||||
|
|
||||||
def heartflow(parent: dict):
|
def heartflow(parent: dict):
|
||||||
heartflow_config = parent["heartflow"]
|
heartflow_config = parent["heartflow"]
|
||||||
config.sub_heart_flow_stop_time = heartflow_config.get(
|
config.sub_heart_flow_stop_time = heartflow_config.get(
|
||||||
@@ -445,6 +441,8 @@ class BotConfig:
|
|||||||
config.default_decay_rate_per_second = heartflow_config.get(
|
config.default_decay_rate_per_second = heartflow_config.get(
|
||||||
"default_decay_rate_per_second", config.default_decay_rate_per_second
|
"default_decay_rate_per_second", config.default_decay_rate_per_second
|
||||||
)
|
)
|
||||||
|
if config.INNER_VERSION in SpecifierSet(">=1.5.1"):
|
||||||
|
config.allow_focus_mode = heartflow_config.get("allow_focus_mode", config.allow_focus_mode)
|
||||||
|
|
||||||
def willing(parent: dict):
|
def willing(parent: dict):
|
||||||
willing_config = parent["willing"]
|
willing_config = parent["willing"]
|
||||||
|
|||||||
@@ -48,9 +48,11 @@ class GetMemoryTool(BaseTool):
|
|||||||
memory_info += memory[1] + "\n"
|
memory_info += memory[1] + "\n"
|
||||||
|
|
||||||
if memory_info:
|
if memory_info:
|
||||||
content = f"你记得这些事情: {memory_info}"
|
content = f"你记得这些事情: {memory_info}\n"
|
||||||
|
content += "以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
content = f"你不太记得有关{topic}的记忆,你对此不太了解"
|
content = f"{topic}的记忆,你记不太清"
|
||||||
|
|
||||||
return {"name": "get_memory", "content": content}
|
return {"name": "get_memory", "content": content}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -8,8 +8,22 @@ from src.plugins.moods.moods import MoodManager
|
|||||||
logger = get_logger("mai_state")
|
logger = get_logger("mai_state")
|
||||||
|
|
||||||
|
|
||||||
# enable_unlimited_hfc_chat = True
|
# -- 状态相关的可配置参数 (可以从 glocal_config 加载) --
|
||||||
enable_unlimited_hfc_chat = False
|
enable_unlimited_hfc_chat = True # 调试用:无限专注聊天
|
||||||
|
# enable_unlimited_hfc_chat = False
|
||||||
|
prevent_offline_state = True # 调试用:防止进入离线状态
|
||||||
|
|
||||||
|
# 不同状态下普通聊天的最大消息数
|
||||||
|
MAX_NORMAL_CHAT_NUM_PEEKING = 30
|
||||||
|
MAX_NORMAL_CHAT_NUM_NORMAL = 40
|
||||||
|
MAX_NORMAL_CHAT_NUM_FOCUSED = 30
|
||||||
|
|
||||||
|
# 不同状态下专注聊天的最大消息数
|
||||||
|
MAX_FOCUSED_CHAT_NUM_PEEKING = 20
|
||||||
|
MAX_FOCUSED_CHAT_NUM_NORMAL = 30
|
||||||
|
MAX_FOCUSED_CHAT_NUM_FOCUSED = 40
|
||||||
|
|
||||||
|
# -- 状态定义 --
|
||||||
|
|
||||||
|
|
||||||
class MaiState(enum.Enum):
|
class MaiState(enum.Enum):
|
||||||
@@ -34,11 +48,11 @@ class MaiState(enum.Enum):
|
|||||||
if self == MaiState.OFFLINE:
|
if self == MaiState.OFFLINE:
|
||||||
return 0
|
return 0
|
||||||
elif self == MaiState.PEEKING:
|
elif self == MaiState.PEEKING:
|
||||||
return 30
|
return MAX_NORMAL_CHAT_NUM_PEEKING
|
||||||
elif self == MaiState.NORMAL_CHAT:
|
elif self == MaiState.NORMAL_CHAT:
|
||||||
return 40
|
return MAX_NORMAL_CHAT_NUM_NORMAL
|
||||||
elif self == MaiState.FOCUSED_CHAT:
|
elif self == MaiState.FOCUSED_CHAT:
|
||||||
return 30
|
return MAX_NORMAL_CHAT_NUM_FOCUSED
|
||||||
|
|
||||||
def get_focused_chat_max_num(self):
|
def get_focused_chat_max_num(self):
|
||||||
# 调试用
|
# 调试用
|
||||||
@@ -48,11 +62,11 @@ class MaiState(enum.Enum):
|
|||||||
if self == MaiState.OFFLINE:
|
if self == MaiState.OFFLINE:
|
||||||
return 0
|
return 0
|
||||||
elif self == MaiState.PEEKING:
|
elif self == MaiState.PEEKING:
|
||||||
return 20
|
return MAX_FOCUSED_CHAT_NUM_PEEKING
|
||||||
elif self == MaiState.NORMAL_CHAT:
|
elif self == MaiState.NORMAL_CHAT:
|
||||||
return 30
|
return MAX_FOCUSED_CHAT_NUM_NORMAL
|
||||||
elif self == MaiState.FOCUSED_CHAT:
|
elif self == MaiState.FOCUSED_CHAT:
|
||||||
return 40
|
return MAX_FOCUSED_CHAT_NUM_FOCUSED
|
||||||
|
|
||||||
|
|
||||||
class MaiStateInfo:
|
class MaiStateInfo:
|
||||||
@@ -110,7 +124,6 @@ class MaiStateManager:
|
|||||||
"""管理 Mai 的整体状态转换逻辑"""
|
"""管理 Mai 的整体状态转换逻辑"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# MaiStateManager doesn't hold the state itself, it operates on a MaiStateInfo instance.
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def check_and_decide_next_state(self, current_state_info: MaiStateInfo) -> Optional[MaiState]:
|
def check_and_decide_next_state(self, current_state_info: MaiStateInfo) -> Optional[MaiState]:
|
||||||
@@ -129,6 +142,13 @@ class MaiStateManager:
|
|||||||
time_since_last_min_check = current_time - current_state_info.last_min_check_time
|
time_since_last_min_check = current_time - current_state_info.last_min_check_time
|
||||||
next_state: Optional[MaiState] = None
|
next_state: Optional[MaiState] = None
|
||||||
|
|
||||||
|
# 辅助函数:根据 prevent_offline_state 标志调整目标状态
|
||||||
|
def _resolve_offline(candidate_state: MaiState) -> MaiState:
|
||||||
|
if prevent_offline_state and candidate_state == MaiState.OFFLINE:
|
||||||
|
logger.debug("阻止进入 OFFLINE,改为 PEEKING")
|
||||||
|
return MaiState.PEEKING
|
||||||
|
return candidate_state
|
||||||
|
|
||||||
if current_status == MaiState.OFFLINE:
|
if current_status == MaiState.OFFLINE:
|
||||||
logger.info("当前[离线],没看手机,思考要不要上线看看......")
|
logger.info("当前[离线],没看手机,思考要不要上线看看......")
|
||||||
elif current_status == MaiState.PEEKING:
|
elif current_status == MaiState.PEEKING:
|
||||||
@@ -141,61 +161,73 @@ class MaiStateManager:
|
|||||||
# 1. 麦麦每分钟都有概率离线
|
# 1. 麦麦每分钟都有概率离线
|
||||||
if time_since_last_min_check >= 60:
|
if time_since_last_min_check >= 60:
|
||||||
if current_status != MaiState.OFFLINE:
|
if current_status != MaiState.OFFLINE:
|
||||||
if random.random() < 0.03: # 3% 概率切换到 OFFLINE,20分钟有50%的概率还在线
|
if random.random() < 0.03: # 3% 概率切换到 OFFLINE
|
||||||
logger.debug(f"突然不想聊了,从 {current_status.value} 切换到 离线")
|
potential_next = MaiState.OFFLINE
|
||||||
next_state = MaiState.OFFLINE
|
resolved_next = _resolve_offline(potential_next)
|
||||||
|
logger.debug(f"规则1:概率触发下线,resolve 为 {resolved_next.value}")
|
||||||
|
# 只有当解析后的状态与当前状态不同时才设置 next_state
|
||||||
|
if resolved_next != current_status:
|
||||||
|
next_state = resolved_next
|
||||||
|
|
||||||
# 2. 状态持续时间规则 (如果没有自行下线)
|
# 2. 状态持续时间规则 (只有在规则1没有触发状态改变时才检查)
|
||||||
if next_state is None:
|
if next_state is None:
|
||||||
|
time_limit_exceeded = False
|
||||||
|
choices_list = []
|
||||||
|
weights = []
|
||||||
|
rule_id = ""
|
||||||
|
|
||||||
if current_status == MaiState.OFFLINE:
|
if current_status == MaiState.OFFLINE:
|
||||||
# OFFLINE 最多保持一分钟
|
# 注意:即使 prevent_offline_state=True,也可能从初始的 OFFLINE 状态启动
|
||||||
# 目前是一个调试值,可以修改
|
|
||||||
if time_in_current_status >= 60:
|
if time_in_current_status >= 60:
|
||||||
|
time_limit_exceeded = True
|
||||||
|
rule_id = "2.1 (From OFFLINE)"
|
||||||
weights = [30, 30, 20, 20]
|
weights = [30, 30, 20, 20]
|
||||||
choices_list = [MaiState.PEEKING, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT, MaiState.OFFLINE]
|
choices_list = [MaiState.PEEKING, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT, MaiState.OFFLINE]
|
||||||
next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0]
|
|
||||||
if next_state_candidate != MaiState.OFFLINE:
|
|
||||||
next_state = next_state_candidate
|
|
||||||
logger.debug(f"上线!开始 {next_state.value}")
|
|
||||||
else:
|
|
||||||
# 继续离线状态
|
|
||||||
next_state = MaiState.OFFLINE
|
|
||||||
|
|
||||||
elif current_status == MaiState.PEEKING:
|
elif current_status == MaiState.PEEKING:
|
||||||
if time_in_current_status >= 600: # PEEKING 最多持续 600 秒
|
if time_in_current_status >= 600: # PEEKING 最多持续 600 秒
|
||||||
|
time_limit_exceeded = True
|
||||||
|
rule_id = "2.2 (From PEEKING)"
|
||||||
weights = [70, 20, 10]
|
weights = [70, 20, 10]
|
||||||
choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT]
|
choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT, MaiState.FOCUSED_CHAT]
|
||||||
next_state = random.choices(choices_list, weights=weights, k=1)[0]
|
|
||||||
logger.debug(f"手机看完了,接下来 {next_state.value}")
|
|
||||||
|
|
||||||
elif current_status == MaiState.NORMAL_CHAT:
|
elif current_status == MaiState.NORMAL_CHAT:
|
||||||
if time_in_current_status >= 300: # NORMAL_CHAT 最多持续 300 秒
|
if time_in_current_status >= 300: # NORMAL_CHAT 最多持续 300 秒
|
||||||
|
time_limit_exceeded = True
|
||||||
|
rule_id = "2.3 (From NORMAL_CHAT)"
|
||||||
weights = [50, 50]
|
weights = [50, 50]
|
||||||
choices_list = [MaiState.OFFLINE, MaiState.FOCUSED_CHAT]
|
choices_list = [MaiState.OFFLINE, MaiState.FOCUSED_CHAT]
|
||||||
next_state = random.choices(choices_list, weights=weights, k=1)[0]
|
|
||||||
if next_state == MaiState.FOCUSED_CHAT:
|
|
||||||
logger.debug(f"继续深入聊天, {next_state.value}")
|
|
||||||
else:
|
|
||||||
logger.debug(f"聊完了,接下来 {next_state.value}")
|
|
||||||
|
|
||||||
elif current_status == MaiState.FOCUSED_CHAT:
|
elif current_status == MaiState.FOCUSED_CHAT:
|
||||||
if time_in_current_status >= 600: # FOCUSED_CHAT 最多持续 600 秒
|
if time_in_current_status >= 600: # FOCUSED_CHAT 最多持续 600 秒
|
||||||
|
time_limit_exceeded = True
|
||||||
|
rule_id = "2.4 (From FOCUSED_CHAT)"
|
||||||
weights = [80, 20]
|
weights = [80, 20]
|
||||||
choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT]
|
choices_list = [MaiState.OFFLINE, MaiState.NORMAL_CHAT]
|
||||||
next_state = random.choices(choices_list, weights=weights, k=1)[0]
|
|
||||||
logger.debug(f"深入聊天结束,接下来 {next_state.value}")
|
|
||||||
|
|
||||||
|
if time_limit_exceeded:
|
||||||
|
next_state_candidate = random.choices(choices_list, weights=weights, k=1)[0]
|
||||||
|
resolved_candidate = _resolve_offline(next_state_candidate)
|
||||||
|
logger.debug(
|
||||||
|
f"规则{rule_id}:时间到,随机选择 {next_state_candidate.value},resolve 为 {resolved_candidate.value}"
|
||||||
|
)
|
||||||
|
next_state = resolved_candidate # 直接使用解析后的状态
|
||||||
|
|
||||||
|
# 注意:enable_unlimited_hfc_chat 优先级高于 prevent_offline_state
|
||||||
|
# 如果触发了这个,它会覆盖上面规则2设置的 next_state
|
||||||
if enable_unlimited_hfc_chat:
|
if enable_unlimited_hfc_chat:
|
||||||
logger.debug("调试用:开挂了,强制切换到专注聊天")
|
logger.debug("调试用:开挂了,强制切换到专注聊天")
|
||||||
next_state = MaiState.FOCUSED_CHAT
|
next_state = MaiState.FOCUSED_CHAT
|
||||||
|
|
||||||
|
# --- 最终决策 --- #
|
||||||
# 如果决定了下一个状态,且这个状态与当前状态不同,则返回下一个状态
|
# 如果决定了下一个状态,且这个状态与当前状态不同,则返回下一个状态
|
||||||
if next_state is not None and next_state != current_status:
|
if next_state is not None and next_state != current_status:
|
||||||
return next_state
|
return next_state
|
||||||
# 如果决定保持 OFFLINE (next_state == MaiState.OFFLINE) 且当前也是 OFFLINE,
|
# 如果决定保持 OFFLINE (next_state == MaiState.OFFLINE) 且当前也是 OFFLINE,
|
||||||
# 并且是由于持续时间规则触发的,返回 OFFLINE 以便调用者可以重置计时器
|
# 并且是由于持续时间规则触发的,返回 OFFLINE 以便调用者可以重置计时器。
|
||||||
|
# 注意:这个分支只有在 prevent_offline_state = False 时才可能被触发。
|
||||||
elif next_state == MaiState.OFFLINE and current_status == MaiState.OFFLINE and time_in_current_status >= 60:
|
elif next_state == MaiState.OFFLINE and current_status == MaiState.OFFLINE and time_in_current_status >= 60:
|
||||||
logger.debug("决定保持 OFFLINE (持续时间规则),返回 OFFLINE 以提示重置计时器。")
|
logger.debug("决定保持 OFFLINE (持续时间规则),返回 OFFLINE 以提示重置计时器。")
|
||||||
return MaiState.OFFLINE # Return OFFLINE to signal caller that timer reset might be needed
|
return MaiState.OFFLINE # Return OFFLINE to signal caller that timer reset might be needed
|
||||||
else:
|
else:
|
||||||
|
# 1. next_state is None (没有触发任何转换规则)
|
||||||
|
# 2. next_state is not None 但等于 current_status (例如规则1想切OFFLINE但被resolve成PEEKING,而当前已经是PEEKING)
|
||||||
|
# 3. next_state is OFFLINE, current is OFFLINE, 但不是因为时间规则触发 (例如初始状态还没到60秒)
|
||||||
return None # 没有状态转换发生或无需重置计时器
|
return None # 没有状态转换发生或无需重置计时器
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from src.plugins.utils.chat_message_builder import (
|
|||||||
build_readable_messages,
|
build_readable_messages,
|
||||||
get_raw_msg_by_timestamp_with_chat,
|
get_raw_msg_by_timestamp_with_chat,
|
||||||
num_new_messages_since,
|
num_new_messages_since,
|
||||||
|
get_person_id_list,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = get_logger("observation")
|
logger = get_logger("observation")
|
||||||
@@ -46,6 +47,8 @@ class ChattingObservation(Observation):
|
|||||||
self.max_mid_memory_len = global_config.compress_length_limit
|
self.max_mid_memory_len = global_config.compress_length_limit
|
||||||
self.mid_memory_info = ""
|
self.mid_memory_info = ""
|
||||||
|
|
||||||
|
self.person_list = []
|
||||||
|
|
||||||
self.llm_summary = LLMRequest(
|
self.llm_summary = LLMRequest(
|
||||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||||
)
|
)
|
||||||
@@ -153,6 +156,10 @@ class ChattingObservation(Observation):
|
|||||||
truncate=True,
|
truncate=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.person_list = await get_person_id_list(self.talking_message)
|
||||||
|
|
||||||
|
# print(f"self.11111person_list: {self.person_list}")
|
||||||
|
|
||||||
logger.trace(
|
logger.trace(
|
||||||
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,6 +12,8 @@ from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls
|
|||||||
from src.heart_flow.chat_state_info import ChatStateInfo
|
from src.heart_flow.chat_state_info import ChatStateInfo
|
||||||
from src.plugins.chat.chat_stream import chat_manager
|
from src.plugins.chat.chat_stream import chat_manager
|
||||||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
||||||
|
import difflib
|
||||||
|
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger("sub_heartflow")
|
logger = get_logger("sub_heartflow")
|
||||||
@@ -20,6 +22,7 @@ logger = get_logger("sub_heartflow")
|
|||||||
def init_prompt():
|
def init_prompt():
|
||||||
prompt = ""
|
prompt = ""
|
||||||
prompt += "{extra_info}\n"
|
prompt += "{extra_info}\n"
|
||||||
|
prompt += "{relation_prompt}\n"
|
||||||
prompt += "你的名字是{bot_name},{prompt_personality}\n"
|
prompt += "你的名字是{bot_name},{prompt_personality}\n"
|
||||||
prompt += "{last_loop_prompt}\n"
|
prompt += "{last_loop_prompt}\n"
|
||||||
prompt += "{cycle_info_block}\n"
|
prompt += "{cycle_info_block}\n"
|
||||||
@@ -47,6 +50,40 @@ def init_prompt():
|
|||||||
Prompt(prompt, "last_loop")
|
Prompt(prompt, "last_loop")
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_similarity(text_a: str, text_b: str) -> float:
|
||||||
|
"""
|
||||||
|
计算两个文本字符串的相似度。
|
||||||
|
"""
|
||||||
|
if not text_a or not text_b:
|
||||||
|
return 0.0
|
||||||
|
matcher = difflib.SequenceMatcher(None, text_a, text_b)
|
||||||
|
return matcher.ratio()
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_replacement_probability(similarity: float) -> float:
|
||||||
|
"""
|
||||||
|
根据相似度计算替换的概率。
|
||||||
|
规则:
|
||||||
|
- 相似度 <= 0.4: 概率 = 0
|
||||||
|
- 相似度 >= 0.9: 概率 = 1
|
||||||
|
- 相似度 == 0.6: 概率 = 0.7
|
||||||
|
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7)
|
||||||
|
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0)
|
||||||
|
"""
|
||||||
|
if similarity <= 0.4:
|
||||||
|
return 0.0
|
||||||
|
elif similarity >= 0.9:
|
||||||
|
return 1.0
|
||||||
|
elif 0.4 < similarity <= 0.6:
|
||||||
|
# p = 3.5 * s - 1.4
|
||||||
|
probability = 3.5 * similarity - 1.4
|
||||||
|
return max(0.0, probability)
|
||||||
|
elif 0.6 < similarity < 0.9:
|
||||||
|
# p = s + 0.1
|
||||||
|
probability = similarity + 0.1
|
||||||
|
return min(1.0, max(0.0, probability))
|
||||||
|
|
||||||
|
|
||||||
class SubMind:
|
class SubMind:
|
||||||
def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: Observation):
|
def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: Observation):
|
||||||
self.subheartflow_id = subheartflow_id
|
self.subheartflow_id = subheartflow_id
|
||||||
@@ -80,7 +117,7 @@ class SubMind:
|
|||||||
|
|
||||||
# ---------- 1. 准备基础数据 ----------
|
# ---------- 1. 准备基础数据 ----------
|
||||||
# 获取现有想法和情绪状态
|
# 获取现有想法和情绪状态
|
||||||
current_thinking_info = self.current_mind
|
previous_mind = self.current_mind if self.current_mind else ""
|
||||||
mood_info = self.chat_state.mood
|
mood_info = self.chat_state.mood
|
||||||
|
|
||||||
# 获取观察对象
|
# 获取观察对象
|
||||||
@@ -92,6 +129,7 @@ class SubMind:
|
|||||||
|
|
||||||
# 获取观察内容
|
# 获取观察内容
|
||||||
chat_observe_info = observation.get_observe_info()
|
chat_observe_info = observation.get_observe_info()
|
||||||
|
person_list = observation.person_list
|
||||||
|
|
||||||
# ---------- 2. 准备工具和个性化数据 ----------
|
# ---------- 2. 准备工具和个性化数据 ----------
|
||||||
# 初始化工具
|
# 初始化工具
|
||||||
@@ -101,6 +139,13 @@ class SubMind:
|
|||||||
# 获取个性化信息
|
# 获取个性化信息
|
||||||
individuality = Individuality.get_instance()
|
individuality = Individuality.get_instance()
|
||||||
|
|
||||||
|
relation_prompt = ""
|
||||||
|
print(f"person_list: {person_list}")
|
||||||
|
for person in person_list:
|
||||||
|
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
|
||||||
|
|
||||||
|
print(f"relat22222ion_prompt: {relation_prompt}")
|
||||||
|
|
||||||
# 构建个性部分
|
# 构建个性部分
|
||||||
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
||||||
|
|
||||||
@@ -136,9 +181,9 @@ class SubMind:
|
|||||||
last_reasoning = ""
|
last_reasoning = ""
|
||||||
is_replan = False
|
is_replan = False
|
||||||
if_replan_prompt = ""
|
if_replan_prompt = ""
|
||||||
if current_thinking_info:
|
if previous_mind:
|
||||||
last_loop_prompt = (await global_prompt_manager.get_prompt_async("last_loop")).format(
|
last_loop_prompt = (await global_prompt_manager.get_prompt_async("last_loop")).format(
|
||||||
current_thinking_info=current_thinking_info, if_replan_prompt=if_replan_prompt
|
current_thinking_info=previous_mind, if_replan_prompt=if_replan_prompt
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
last_loop_prompt = ""
|
last_loop_prompt = ""
|
||||||
@@ -196,6 +241,7 @@ class SubMind:
|
|||||||
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
|
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
|
||||||
extra_info="", # 可以在这里添加额外信息
|
extra_info="", # 可以在这里添加额外信息
|
||||||
prompt_personality=prompt_personality,
|
prompt_personality=prompt_personality,
|
||||||
|
relation_prompt=relation_prompt,
|
||||||
bot_name=individuality.name,
|
bot_name=individuality.name,
|
||||||
time_now=time_now,
|
time_now=time_now,
|
||||||
chat_observe_info=chat_observe_info,
|
chat_observe_info=chat_observe_info,
|
||||||
@@ -205,8 +251,6 @@ class SubMind:
|
|||||||
cycle_info_block=cycle_info_block,
|
cycle_info_block=cycle_info_block,
|
||||||
)
|
)
|
||||||
|
|
||||||
# logger.debug(f"[{self.subheartflow_id}] 心流思考提示词构建完成")
|
|
||||||
|
|
||||||
# ---------- 5. 执行LLM请求并处理响应 ----------
|
# ---------- 5. 执行LLM请求并处理响应 ----------
|
||||||
content = "" # 初始化内容变量
|
content = "" # 初始化内容变量
|
||||||
_reasoning_content = "" # 初始化推理内容变量
|
_reasoning_content = "" # 初始化推理内容变量
|
||||||
@@ -240,7 +284,7 @@ class SubMind:
|
|||||||
elif not success:
|
elif not success:
|
||||||
logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}")
|
logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}")
|
||||||
else:
|
else:
|
||||||
logger.info(f"{self.log_prefix} 心流未使用工具") # 修改日志信息,明确是未使用工具而不是未处理
|
logger.info(f"{self.log_prefix} 心流未使用工具")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# 处理总体异常
|
# 处理总体异常
|
||||||
@@ -248,15 +292,89 @@ class SubMind:
|
|||||||
logger.error(traceback.format_exc())
|
logger.error(traceback.format_exc())
|
||||||
content = "思考过程中出现错误"
|
content = "思考过程中出现错误"
|
||||||
|
|
||||||
# 记录最终思考结果
|
# 记录初步思考结果
|
||||||
logger.debug(f"{self.log_prefix} \nPrompt:\n{prompt}\n\n心流思考结果:\n{content}\n")
|
logger.debug(f"{self.log_prefix} 初步心流思考结果: {content}\nprompt: {prompt}\n")
|
||||||
|
|
||||||
# 处理空响应情况
|
# 处理空响应情况
|
||||||
if not content:
|
if not content:
|
||||||
content = "(不知道该想些什么...)"
|
content = "(不知道该想些什么...)"
|
||||||
logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。")
|
logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。")
|
||||||
|
|
||||||
# ---------- 6. 更新思考状态并返回结果 ----------
|
# ---------- 6. 应用概率性去重和修饰 ----------
|
||||||
|
new_content = content # 保存 LLM 直接输出的结果
|
||||||
|
try:
|
||||||
|
similarity = calculate_similarity(previous_mind, new_content)
|
||||||
|
replacement_prob = calculate_replacement_probability(similarity)
|
||||||
|
logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}")
|
||||||
|
|
||||||
|
# 定义词语列表 (移到判断之前)
|
||||||
|
yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"]
|
||||||
|
zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"]
|
||||||
|
cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"]
|
||||||
|
zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao
|
||||||
|
|
||||||
|
if random.random() < replacement_prob:
|
||||||
|
# 相似度非常高时,尝试去重或特殊处理
|
||||||
|
if similarity == 1.0:
|
||||||
|
logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...")
|
||||||
|
# 随机截取大约一半内容
|
||||||
|
if len(new_content) > 1: # 避免内容过短无法截取
|
||||||
|
split_point = max(
|
||||||
|
1, len(new_content) // 2 + random.randint(-len(new_content) // 4, len(new_content) // 4)
|
||||||
|
)
|
||||||
|
truncated_content = new_content[:split_point]
|
||||||
|
else:
|
||||||
|
truncated_content = new_content # 如果只有一个字符或者为空,就不截取了
|
||||||
|
|
||||||
|
# 添加语气词和转折/承接词
|
||||||
|
yu_qi_ci = random.choice(yu_qi_ci_liebiao)
|
||||||
|
zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao)
|
||||||
|
content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}"
|
||||||
|
logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# 相似度较高但非100%,执行标准去重逻辑
|
||||||
|
logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...")
|
||||||
|
matcher = difflib.SequenceMatcher(None, previous_mind, new_content)
|
||||||
|
deduplicated_parts = []
|
||||||
|
last_match_end_in_b = 0
|
||||||
|
for _i, j, n in matcher.get_matching_blocks():
|
||||||
|
if last_match_end_in_b < j:
|
||||||
|
deduplicated_parts.append(new_content[last_match_end_in_b:j])
|
||||||
|
last_match_end_in_b = j + n
|
||||||
|
|
||||||
|
deduplicated_content = "".join(deduplicated_parts).strip()
|
||||||
|
|
||||||
|
if deduplicated_content:
|
||||||
|
# 根据概率决定是否添加词语
|
||||||
|
prefix_str = ""
|
||||||
|
if random.random() < 0.3: # 30% 概率添加语气词
|
||||||
|
prefix_str += random.choice(yu_qi_ci_liebiao)
|
||||||
|
if random.random() < 0.7: # 70% 概率添加转折/承接词
|
||||||
|
prefix_str += random.choice(zhuan_jie_ci_liebiao)
|
||||||
|
|
||||||
|
# 组合最终结果
|
||||||
|
if prefix_str:
|
||||||
|
content = f"{prefix_str},{deduplicated_content}" # 更新 content
|
||||||
|
logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}")
|
||||||
|
else:
|
||||||
|
content = deduplicated_content # 更新 content
|
||||||
|
logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}")
|
||||||
|
content = new_content # 保留原始 content
|
||||||
|
else:
|
||||||
|
logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})")
|
||||||
|
# content 保持 new_content 不变
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}")
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
# 出错时保留原始 content
|
||||||
|
content = new_content
|
||||||
|
|
||||||
|
# ---------- 7. 更新思考状态并返回结果 ----------
|
||||||
|
logger.info(f"{self.log_prefix} 最终心流思考结果: {content}")
|
||||||
# 更新当前思考内容
|
# 更新当前思考内容
|
||||||
self.update_current_mind(content)
|
self.update_current_mind(content)
|
||||||
|
|
||||||
|
|||||||
@@ -264,6 +264,13 @@ class SubHeartflowManager:
|
|||||||
current_state = self.mai_state_info.get_current_state()
|
current_state = self.mai_state_info.get_current_state()
|
||||||
focused_limit = current_state.get_focused_chat_max_num()
|
focused_limit = current_state.get_focused_chat_max_num()
|
||||||
|
|
||||||
|
# --- 新增:检查是否允许进入 FOCUS 模式 --- #
|
||||||
|
if not global_config.allow_focus_mode:
|
||||||
|
if int(time.time()) % 60 == 0: # 每60秒输出一次日志避免刷屏
|
||||||
|
logger.debug(f"{log_prefix} 配置不允许进入 FOCUSED 状态 (allow_focus_mode=False)")
|
||||||
|
return # 如果不允许,直接返回
|
||||||
|
# --- 结束新增 ---
|
||||||
|
|
||||||
logger.debug(f"{log_prefix} 当前状态 ({current_state.value}) 开始尝试提升到FOCUSED状态")
|
logger.debug(f"{log_prefix} 当前状态 ({current_state.value}) 开始尝试提升到FOCUSED状态")
|
||||||
|
|
||||||
if int(time.time()) % 20 == 0: # 每20秒输出一次
|
if int(time.time()) % 20 == 0: # 每20秒输出一次
|
||||||
|
|||||||
@@ -81,6 +81,24 @@ block_and_ignore: 更加极端的结束对话方式,直接结束对话并在
|
|||||||
|
|
||||||
注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
|
注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
|
||||||
|
|
||||||
|
# 新增:Prompt(3): 决定是否在结束对话前发送告别语
|
||||||
|
PROMPT_END_DECISION = """{persona_text}。刚刚你决定结束一场 QQ 私聊。
|
||||||
|
|
||||||
|
【你们之前的聊天记录】
|
||||||
|
{chat_history_text}
|
||||||
|
|
||||||
|
你觉得你们的对话已经完整结束了吗?有时候,在对话自然结束后再说点什么可能会有点奇怪,但有时也可能需要一条简短的消息来圆满结束。
|
||||||
|
如果觉得确实有必要再发一条简短、自然、符合你人设的告别消息(比如 "好,下次再聊~" 或 "嗯,先这样吧"),就输出 "yes"。
|
||||||
|
如果觉得当前状态下直接结束对话更好,没有必要再发消息,就输出 "no"。
|
||||||
|
|
||||||
|
请以 JSON 格式输出你的选择:
|
||||||
|
{{
|
||||||
|
"say_bye": "yes/no",
|
||||||
|
"reason": "选择 yes 或 no 的原因和内心想法 (简要说明)"
|
||||||
|
}}
|
||||||
|
|
||||||
|
注意:请严格按照 JSON 格式输出,不要包含任何其他内容。"""
|
||||||
|
|
||||||
|
|
||||||
# ActionPlanner 类定义,顶格
|
# ActionPlanner 类定义,顶格
|
||||||
class ActionPlanner:
|
class ActionPlanner:
|
||||||
@@ -336,9 +354,10 @@ class ActionPlanner:
|
|||||||
logger.debug(f"[私聊][{self.private_name}]发送到LLM的最终提示词:\n------\n{prompt}\n------")
|
logger.debug(f"[私聊][{self.private_name}]发送到LLM的最终提示词:\n------\n{prompt}\n------")
|
||||||
try:
|
try:
|
||||||
content, _ = await self.llm.generate_response_async(prompt)
|
content, _ = await self.llm.generate_response_async(prompt)
|
||||||
logger.debug(f"[私聊][{self.private_name}]LLM原始返回内容: {content}")
|
logger.debug(f"[私聊][{self.private_name}]LLM (行动规划) 原始返回内容: {content}")
|
||||||
|
|
||||||
success, result = get_items_from_json(
|
# --- 初始行动规划解析 ---
|
||||||
|
success, initial_result = get_items_from_json(
|
||||||
content,
|
content,
|
||||||
self.private_name,
|
self.private_name,
|
||||||
"action",
|
"action",
|
||||||
@@ -346,30 +365,90 @@ class ActionPlanner:
|
|||||||
default_values={"action": "wait", "reason": "LLM返回格式错误或未提供原因,默认等待"},
|
default_values={"action": "wait", "reason": "LLM返回格式错误或未提供原因,默认等待"},
|
||||||
)
|
)
|
||||||
|
|
||||||
action = result.get("action", "wait")
|
initial_action = initial_result.get("action", "wait")
|
||||||
reason = result.get("reason", "LLM未提供原因,默认等待")
|
initial_reason = initial_result.get("reason", "LLM未提供原因,默认等待")
|
||||||
|
|
||||||
# 验证action类型
|
# 检查是否需要进行结束对话决策 ---
|
||||||
# 更新 valid_actions 列表以包含 send_new_message
|
if initial_action == "end_conversation":
|
||||||
valid_actions = [
|
logger.info(f"[私聊][{self.private_name}]初步规划结束对话,进入告别决策...")
|
||||||
"direct_reply",
|
|
||||||
"send_new_message", # 添加新动作
|
|
||||||
"fetch_knowledge",
|
|
||||||
"wait",
|
|
||||||
"listening",
|
|
||||||
"rethink_goal",
|
|
||||||
"end_conversation",
|
|
||||||
"block_and_ignore",
|
|
||||||
]
|
|
||||||
if action not in valid_actions:
|
|
||||||
logger.warning(f"[私聊][{self.private_name}]LLM返回了未知的行动类型: '{action}',强制改为 wait")
|
|
||||||
reason = f"(原始行动'{action}'无效,已强制改为wait) {reason}"
|
|
||||||
action = "wait"
|
|
||||||
|
|
||||||
logger.info(f"[私聊][{self.private_name}]规划的行动: {action}")
|
# 使用新的 PROMPT_END_DECISION
|
||||||
logger.info(f"[私聊][{self.private_name}]行动原因: {reason}")
|
end_decision_prompt = PROMPT_END_DECISION.format(
|
||||||
return action, reason
|
persona_text=persona_text, # 复用之前的 persona_text
|
||||||
|
chat_history_text=chat_history_text, # 复用之前的 chat_history_text
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.debug(
|
||||||
|
f"[私聊][{self.private_name}]发送到LLM的结束决策提示词:\n------\n{end_decision_prompt}\n------"
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
end_content, _ = await self.llm.generate_response_async(end_decision_prompt) # 再次调用LLM
|
||||||
|
logger.debug(f"[私聊][{self.private_name}]LLM (结束决策) 原始返回内容: {end_content}")
|
||||||
|
|
||||||
|
# 解析结束决策的JSON
|
||||||
|
end_success, end_result = get_items_from_json(
|
||||||
|
end_content,
|
||||||
|
self.private_name,
|
||||||
|
"say_bye",
|
||||||
|
"reason",
|
||||||
|
default_values={"say_bye": "no", "reason": "结束决策LLM返回格式错误,默认不告别"},
|
||||||
|
required_types={"say_bye": str, "reason": str}, # 明确类型
|
||||||
|
)
|
||||||
|
|
||||||
|
say_bye_decision = end_result.get("say_bye", "no").lower() # 转小写方便比较
|
||||||
|
end_decision_reason = end_result.get("reason", "未提供原因")
|
||||||
|
|
||||||
|
if end_success and say_bye_decision == "yes":
|
||||||
|
# 决定要告别,返回新的 'say_goodbye' 动作
|
||||||
|
logger.info(
|
||||||
|
f"[私聊][{self.private_name}]结束决策: yes, 准备生成告别语. 原因: {end_decision_reason}"
|
||||||
|
)
|
||||||
|
# 注意:这里的 reason 可以考虑拼接初始原因和结束决策原因,或者只用结束决策原因
|
||||||
|
final_action = "say_goodbye"
|
||||||
|
final_reason = f"决定发送告别语。决策原因: {end_decision_reason} (原结束理由: {initial_reason})"
|
||||||
|
return final_action, final_reason
|
||||||
|
else:
|
||||||
|
# 决定不告别 (包括解析失败或明确说no)
|
||||||
|
logger.info(
|
||||||
|
f"[私聊][{self.private_name}]结束决策: no, 直接结束对话. 原因: {end_decision_reason}"
|
||||||
|
)
|
||||||
|
# 返回原始的 'end_conversation' 动作
|
||||||
|
final_action = "end_conversation"
|
||||||
|
final_reason = initial_reason # 保持原始的结束理由
|
||||||
|
return final_action, final_reason
|
||||||
|
|
||||||
|
except Exception as end_e:
|
||||||
|
logger.error(f"[私聊][{self.private_name}]调用结束决策LLM或处理结果时出错: {str(end_e)}")
|
||||||
|
# 出错时,默认执行原始的结束对话
|
||||||
|
logger.warning(f"[私聊][{self.private_name}]结束决策出错,将按原计划执行 end_conversation")
|
||||||
|
return "end_conversation", initial_reason # 返回原始动作和原因
|
||||||
|
|
||||||
|
else:
|
||||||
|
action = initial_action
|
||||||
|
reason = initial_reason
|
||||||
|
|
||||||
|
# 验证action类型 (保持不变)
|
||||||
|
valid_actions = [
|
||||||
|
"direct_reply",
|
||||||
|
"send_new_message",
|
||||||
|
"fetch_knowledge",
|
||||||
|
"wait",
|
||||||
|
"listening",
|
||||||
|
"rethink_goal",
|
||||||
|
"end_conversation", # 仍然需要验证,因为可能从上面决策后返回
|
||||||
|
"block_and_ignore",
|
||||||
|
"say_goodbye", # 也要验证这个新动作
|
||||||
|
]
|
||||||
|
if action not in valid_actions:
|
||||||
|
logger.warning(f"[私聊][{self.private_name}]LLM返回了未知的行动类型: '{action}',强制改为 wait")
|
||||||
|
reason = f"(原始行动'{action}'无效,已强制改为wait) {reason}"
|
||||||
|
action = "wait"
|
||||||
|
|
||||||
|
logger.info(f"[私聊][{self.private_name}]规划的行动: {action}")
|
||||||
|
logger.info(f"[私聊][{self.private_name}]行动原因: {reason}")
|
||||||
|
return action, reason
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
# 外层异常处理保持不变
|
||||||
logger.error(f"[私聊][{self.private_name}]规划行动时调用 LLM 或处理结果出错: {str(e)}")
|
logger.error(f"[私聊][{self.private_name}]规划行动时调用 LLM 或处理结果出错: {str(e)}")
|
||||||
return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}"
|
return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}"
|
||||||
|
|||||||
@@ -564,10 +564,48 @@ class Conversation:
|
|||||||
)
|
)
|
||||||
self.conversation_info.last_successful_reply_action = None # 重置状态
|
self.conversation_info.last_successful_reply_action = None # 重置状态
|
||||||
|
|
||||||
|
elif action == "say_goodbye":
|
||||||
|
self.state = ConversationState.GENERATING # 也可以定义一个新的状态,如 ENDING
|
||||||
|
logger.info(f"[私聊][{self.private_name}]执行行动: 生成并发送告别语...")
|
||||||
|
try:
|
||||||
|
# 1. 生成告别语 (使用 'say_goodbye' action_type)
|
||||||
|
self.generated_reply = await self.reply_generator.generate(
|
||||||
|
observation_info, conversation_info, action_type="say_goodbye"
|
||||||
|
)
|
||||||
|
logger.info(f"[私聊][{self.private_name}]生成的告别语: {self.generated_reply}")
|
||||||
|
|
||||||
|
# 2. 直接发送告别语 (不经过检查)
|
||||||
|
if self.generated_reply: # 确保生成了内容
|
||||||
|
await self._send_reply() # 调用发送方法
|
||||||
|
# 发送成功后,标记动作成功
|
||||||
|
action_successful = True
|
||||||
|
logger.info(f"[私聊][{self.private_name}]告别语已发送。")
|
||||||
|
else:
|
||||||
|
logger.warning(f"[私聊][{self.private_name}]未能生成告别语内容,无法发送。")
|
||||||
|
action_successful = False # 标记动作失败
|
||||||
|
conversation_info.done_action[action_index].update(
|
||||||
|
{"status": "recall", "final_reason": "未能生成告别语内容"}
|
||||||
|
)
|
||||||
|
|
||||||
|
# 3. 无论是否发送成功,都准备结束对话
|
||||||
|
self.should_continue = False
|
||||||
|
logger.info(f"[私聊][{self.private_name}]发送告别语流程结束,即将停止对话实例。")
|
||||||
|
|
||||||
|
except Exception as goodbye_err:
|
||||||
|
logger.error(f"[私聊][{self.private_name}]生成或发送告别语时出错: {goodbye_err}")
|
||||||
|
logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
|
||||||
|
# 即使出错,也结束对话
|
||||||
|
self.should_continue = False
|
||||||
|
action_successful = False # 标记动作失败
|
||||||
|
conversation_info.done_action[action_index].update(
|
||||||
|
{"status": "recall", "final_reason": f"生成或发送告别语时出错: {goodbye_err}"}
|
||||||
|
)
|
||||||
|
|
||||||
elif action == "end_conversation":
|
elif action == "end_conversation":
|
||||||
|
# 这个分支现在只会在 action_planner 最终决定不告别时被调用
|
||||||
self.should_continue = False
|
self.should_continue = False
|
||||||
logger.info(f"[私聊][{self.private_name}]决定结束对话...")
|
logger.info(f"[私聊][{self.private_name}]收到最终结束指令,停止对话...")
|
||||||
action_successful = True # 标记动作成功
|
action_successful = True # 标记这个指令本身是成功的
|
||||||
|
|
||||||
elif action == "block_and_ignore":
|
elif action == "block_and_ignore":
|
||||||
logger.info(f"[私聊][{self.private_name}]不想再理你了...")
|
logger.info(f"[私聊][{self.private_name}]不想再理你了...")
|
||||||
|
|||||||
@@ -57,6 +57,24 @@ PROMPT_SEND_NEW_MESSAGE = """{persona_text}。现在你在参与一场QQ私聊
|
|||||||
|
|
||||||
请直接输出回复内容,不需要任何额外格式。"""
|
请直接输出回复内容,不需要任何额外格式。"""
|
||||||
|
|
||||||
|
# Prompt for say_goodbye (告别语生成)
|
||||||
|
PROMPT_FAREWELL = """{persona_text}。你在参与一场 QQ 私聊,现在对话似乎已经结束,你决定再发一条最后的消息来圆满结束。
|
||||||
|
|
||||||
|
最近的聊天记录:
|
||||||
|
{chat_history_text}
|
||||||
|
|
||||||
|
请根据上述信息,结合聊天记录,构思一条**简短、自然、符合你人设**的最后的消息。
|
||||||
|
这条消息应该:
|
||||||
|
1. 从你自己的角度发言。
|
||||||
|
2. 符合你的性格特征和身份细节。
|
||||||
|
3. 通俗易懂,自然流畅,通常很简短。
|
||||||
|
4. 自然地为这场对话画上句号,避免开启新话题或显得冗长、刻意。
|
||||||
|
|
||||||
|
请像真人一样随意自然,**简洁是关键**。
|
||||||
|
不要输出多余内容(包括前后缀、冒号、引号、括号、表情包、at或@等)。
|
||||||
|
|
||||||
|
请直接输出最终的告别消息内容,不需要任何额外格式。"""
|
||||||
|
|
||||||
|
|
||||||
class ReplyGenerator:
|
class ReplyGenerator:
|
||||||
"""回复生成器"""
|
"""回复生成器"""
|
||||||
@@ -135,7 +153,10 @@ class ReplyGenerator:
|
|||||||
if action_type == "send_new_message":
|
if action_type == "send_new_message":
|
||||||
prompt_template = PROMPT_SEND_NEW_MESSAGE
|
prompt_template = PROMPT_SEND_NEW_MESSAGE
|
||||||
logger.info(f"[私聊][{self.private_name}]使用 PROMPT_SEND_NEW_MESSAGE (追问生成)")
|
logger.info(f"[私聊][{self.private_name}]使用 PROMPT_SEND_NEW_MESSAGE (追问生成)")
|
||||||
else: # 默认使用 direct_reply 的 prompt
|
elif action_type == "say_goodbye": # 处理告别动作
|
||||||
|
prompt_template = PROMPT_FAREWELL
|
||||||
|
logger.info(f"[私聊][{self.private_name}]使用 PROMPT_FAREWELL (告别语生成)")
|
||||||
|
else: # 默认使用 direct_reply 的 prompt (包括 'direct_reply' 或其他未明确处理的类型)
|
||||||
prompt_template = PROMPT_DIRECT_REPLY
|
prompt_template = PROMPT_DIRECT_REPLY
|
||||||
logger.info(f"[私聊][{self.private_name}]使用 PROMPT_DIRECT_REPLY (首次/非连续回复生成)")
|
logger.info(f"[私聊][{self.private_name}]使用 PROMPT_DIRECT_REPLY (首次/非连续回复生成)")
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,8 @@ logger = get_logger("emoji")
|
|||||||
BASE_DIR = os.path.join("data")
|
BASE_DIR = os.path.join("data")
|
||||||
EMOJI_DIR = os.path.join(BASE_DIR, "emoji") # 表情包存储目录
|
EMOJI_DIR = os.path.join(BASE_DIR, "emoji") # 表情包存储目录
|
||||||
EMOJI_REGISTED_DIR = os.path.join(BASE_DIR, "emoji_registed") # 已注册的表情包注册目录
|
EMOJI_REGISTED_DIR = os.path.join(BASE_DIR, "emoji_registed") # 已注册的表情包注册目录
|
||||||
MAX_EMOJI_FOR_PROMPT = 20 # 最大表情包描述于图片替换的 prompt 中
|
MAX_EMOJI_FOR_PROMPT = 20 # 最大允许的表情包描述数量于图片替换的 prompt 中
|
||||||
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
还没经过测试,有些地方数据库和内存数据同步可能不完全
|
还没经过测试,有些地方数据库和内存数据同步可能不完全
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import random # <--- 添加导入
|
||||||
from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
|
from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending
|
from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending
|
||||||
@@ -31,6 +32,8 @@ from src.individuality.individuality import Individuality
|
|||||||
|
|
||||||
INITIAL_DURATION = 60.0
|
INITIAL_DURATION = 60.0
|
||||||
|
|
||||||
|
WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger("interest") # Logger Name Changed
|
logger = get_logger("interest") # Logger Name Changed
|
||||||
|
|
||||||
@@ -45,10 +48,11 @@ class ActionManager:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
# 初始化为默认动作集
|
# 初始化为默认动作集
|
||||||
self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy()
|
self._available_actions: Dict[str, str] = DEFAULT_ACTIONS.copy()
|
||||||
|
self._original_actions_backup: Optional[Dict[str, str]] = None # 用于临时移除时的备份
|
||||||
|
|
||||||
def get_available_actions(self) -> Dict[str, str]:
|
def get_available_actions(self) -> Dict[str, str]:
|
||||||
"""获取当前可用的动作集"""
|
"""获取当前可用的动作集"""
|
||||||
return self._available_actions
|
return self._available_actions.copy() # 返回副本以防外部修改
|
||||||
|
|
||||||
def add_action(self, action_name: str, description: str) -> bool:
|
def add_action(self, action_name: str, description: str) -> bool:
|
||||||
"""
|
"""
|
||||||
@@ -81,6 +85,30 @@ class ActionManager:
|
|||||||
del self._available_actions[action_name]
|
del self._available_actions[action_name]
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def temporarily_remove_actions(self, actions_to_remove: List[str]):
|
||||||
|
"""
|
||||||
|
临时移除指定的动作,备份原始动作集。
|
||||||
|
如果已经有备份,则不重复备份。
|
||||||
|
"""
|
||||||
|
if self._original_actions_backup is None:
|
||||||
|
self._original_actions_backup = self._available_actions.copy()
|
||||||
|
|
||||||
|
actions_actually_removed = []
|
||||||
|
for action_name in actions_to_remove:
|
||||||
|
if action_name in self._available_actions:
|
||||||
|
del self._available_actions[action_name]
|
||||||
|
actions_actually_removed.append(action_name)
|
||||||
|
# logger.debug(f"临时移除了动作: {actions_actually_removed}") # 可选日志
|
||||||
|
|
||||||
|
def restore_actions(self):
|
||||||
|
"""
|
||||||
|
恢复之前备份的原始动作集。
|
||||||
|
"""
|
||||||
|
if self._original_actions_backup is not None:
|
||||||
|
self._available_actions = self._original_actions_backup.copy()
|
||||||
|
self._original_actions_backup = None
|
||||||
|
# logger.debug("恢复了原始动作集") # 可选日志
|
||||||
|
|
||||||
def clear_actions(self):
|
def clear_actions(self):
|
||||||
"""清空所有动作"""
|
"""清空所有动作"""
|
||||||
self._available_actions.clear()
|
self._available_actions.clear()
|
||||||
@@ -151,7 +179,7 @@ class HeartFChatting:
|
|||||||
其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。
|
其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。
|
||||||
"""
|
"""
|
||||||
|
|
||||||
CONSECUTIVE_NO_REPLY_THRESHOLD = 5 # 连续不回复的阈值
|
CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
@@ -214,6 +242,7 @@ class HeartFChatting:
|
|||||||
self._current_cycle: Optional[CycleInfo] = None
|
self._current_cycle: Optional[CycleInfo] = None
|
||||||
self._lian_xu_bu_hui_fu_ci_shu: int = 0 # <--- 新增:连续不回复计数器
|
self._lian_xu_bu_hui_fu_ci_shu: int = 0 # <--- 新增:连续不回复计数器
|
||||||
self._shutting_down: bool = False # <--- 新增:关闭标志位
|
self._shutting_down: bool = False # <--- 新增:关闭标志位
|
||||||
|
self._lian_xu_deng_dai_shi_jian: float = 0.0 # <--- 新增:累计等待时间
|
||||||
|
|
||||||
async def _initialize(self) -> bool:
|
async def _initialize(self) -> bool:
|
||||||
"""
|
"""
|
||||||
@@ -489,6 +518,7 @@ class HeartFChatting:
|
|||||||
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
|
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
|
||||||
# 出错时也重置计数器
|
# 出错时也重置计数器
|
||||||
self._lian_xu_bu_hui_fu_ci_shu = 0
|
self._lian_xu_bu_hui_fu_ci_shu = 0
|
||||||
|
self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间
|
||||||
return False, ""
|
return False, ""
|
||||||
|
|
||||||
async def _handle_text_reply(self, reasoning: str, emoji_query: str, cycle_timers: dict) -> tuple[bool, str]:
|
async def _handle_text_reply(self, reasoning: str, emoji_query: str, cycle_timers: dict) -> tuple[bool, str]:
|
||||||
@@ -511,6 +541,7 @@ class HeartFChatting:
|
|||||||
"""
|
"""
|
||||||
# 重置连续不回复计数器
|
# 重置连续不回复计数器
|
||||||
self._lian_xu_bu_hui_fu_ci_shu = 0
|
self._lian_xu_bu_hui_fu_ci_shu = 0
|
||||||
|
self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间
|
||||||
|
|
||||||
# 获取锚点消息
|
# 获取锚点消息
|
||||||
anchor_message = await self._get_anchor_message()
|
anchor_message = await self._get_anchor_message()
|
||||||
@@ -566,6 +597,7 @@ class HeartFChatting:
|
|||||||
bool: 是否发送成功
|
bool: 是否发送成功
|
||||||
"""
|
"""
|
||||||
logger.info(f"{self.log_prefix} 决定回复表情({emoji_query}): {reasoning}")
|
logger.info(f"{self.log_prefix} 决定回复表情({emoji_query}): {reasoning}")
|
||||||
|
self._lian_xu_deng_dai_shi_jian = 0.0 # 重置累计等待时间(即使不计数也保持一致性)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
anchor = await self._get_anchor_message()
|
anchor = await self._get_anchor_message()
|
||||||
@@ -601,23 +633,41 @@ class HeartFChatting:
|
|||||||
observation = self.observations[0] if self.observations else None
|
observation = self.observations[0] if self.observations else None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
dang_qian_deng_dai = 0.0 # 初始化本次等待时间
|
||||||
with Timer("等待新消息", cycle_timers):
|
with Timer("等待新消息", cycle_timers):
|
||||||
# 等待新消息、超时或关闭信号,并获取结果
|
# 等待新消息、超时或关闭信号,并获取结果
|
||||||
await self._wait_for_new_message(observation, planner_start_db_time, self.log_prefix)
|
await self._wait_for_new_message(observation, planner_start_db_time, self.log_prefix)
|
||||||
|
# 从计时器获取实际等待时间
|
||||||
|
dang_qian_deng_dai = cycle_timers.get("等待新消息", 0.0)
|
||||||
|
|
||||||
if not self._shutting_down:
|
if not self._shutting_down:
|
||||||
self._lian_xu_bu_hui_fu_ci_shu += 1
|
self._lian_xu_bu_hui_fu_ci_shu += 1
|
||||||
|
self._lian_xu_deng_dai_shi_jian += dang_qian_deng_dai # 累加等待时间
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{self.CONSECUTIVE_NO_REPLY_THRESHOLD}"
|
f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{self.CONSECUTIVE_NO_REPLY_THRESHOLD}, "
|
||||||
|
f"本次等待: {dang_qian_deng_dai:.2f}秒, 累计等待: {self._lian_xu_deng_dai_shi_jian:.2f}秒"
|
||||||
)
|
)
|
||||||
|
|
||||||
# 检查是否达到阈值
|
# 检查是否同时达到次数和时间阈值
|
||||||
if self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD:
|
time_threshold = 0.66 * WAITING_TIME_THRESHOLD * self.CONSECUTIVE_NO_REPLY_THRESHOLD
|
||||||
|
if (
|
||||||
|
self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD
|
||||||
|
and self._lian_xu_deng_dai_shi_jian >= time_threshold
|
||||||
|
):
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.log_prefix} 连续不回复达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次),调用回调请求状态转换"
|
f"{self.log_prefix} 连续不回复达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) "
|
||||||
|
f"且累计等待时间达到 {self._lian_xu_deng_dai_shi_jian:.2f}秒 (阈值 {time_threshold}秒),"
|
||||||
|
f"调用回调请求状态转换"
|
||||||
)
|
)
|
||||||
# 调用回调。注意:这里不重置计数器,依赖回调函数成功改变状态来隐式重置上下文。
|
# 调用回调。注意:这里不重置计数器和时间,依赖回调函数成功改变状态来隐式重置上下文。
|
||||||
await self.on_consecutive_no_reply_callback()
|
await self.on_consecutive_no_reply_callback()
|
||||||
|
elif self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD:
|
||||||
|
# 仅次数达到阈值,但时间未达到
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix} 连续不回复次数达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) "
|
||||||
|
f"但累计等待时间 {self._lian_xu_deng_dai_shi_jian:.2f}秒 未达到时间阈值 ({time_threshold}秒),暂不调用回调"
|
||||||
|
)
|
||||||
|
# else: 次数和时间都未达到阈值,不做处理
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -658,8 +708,8 @@ class HeartFChatting:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
# 检查超时 (放在检查新消息和关闭之后)
|
# 检查超时 (放在检查新消息和关闭之后)
|
||||||
if time.monotonic() - wait_start_time > 120:
|
if time.monotonic() - wait_start_time > WAITING_TIME_THRESHOLD:
|
||||||
logger.warning(f"{log_prefix} 等待新消息超时(20秒)")
|
logger.warning(f"{log_prefix} 等待新消息超时({WAITING_TIME_THRESHOLD}秒)")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -737,9 +787,53 @@ class HeartFChatting:
|
|||||||
|
|
||||||
参数:
|
参数:
|
||||||
current_mind: 子思维的当前思考结果
|
current_mind: 子思维的当前思考结果
|
||||||
|
cycle_timers: 计时器字典
|
||||||
|
is_re_planned: 是否为重新规划
|
||||||
"""
|
"""
|
||||||
logger.info(f"{self.log_prefix}[Planner] 开始{'重新' if is_re_planned else ''}执行规划器")
|
logger.info(f"{self.log_prefix}[Planner] 开始{'重新' if is_re_planned else ''}执行规划器")
|
||||||
|
|
||||||
|
# --- 新增:检查历史动作并调整可用动作 ---
|
||||||
|
lian_xu_wen_ben_hui_fu = 0 # 连续文本回复次数
|
||||||
|
actions_to_remove_temporarily = []
|
||||||
|
probability_roll = random.random() # 在循环外掷骰子一次,用于概率判断
|
||||||
|
|
||||||
|
# 反向遍历最近的循环历史
|
||||||
|
for cycle in reversed(self._cycle_history):
|
||||||
|
# 只关心实际执行了动作的循环
|
||||||
|
if cycle.action_taken:
|
||||||
|
if cycle.action_type == "text_reply":
|
||||||
|
lian_xu_wen_ben_hui_fu += 1
|
||||||
|
else:
|
||||||
|
break # 遇到非文本回复,中断计数
|
||||||
|
# 检查最近的3个循环即可,避免检查过多历史 (如果历史很长)
|
||||||
|
if len(self._cycle_history) > 0 and cycle.cycle_id <= self._cycle_history[0].cycle_id + (
|
||||||
|
len(self._cycle_history) - 4
|
||||||
|
):
|
||||||
|
break
|
||||||
|
|
||||||
|
logger.debug(f"{self.log_prefix}[Planner] 检测到连续文本回复次数: {lian_xu_wen_ben_hui_fu}")
|
||||||
|
|
||||||
|
# 根据连续次数决定临时移除哪些动作
|
||||||
|
if lian_xu_wen_ben_hui_fu >= 3:
|
||||||
|
logger.info(f"{self.log_prefix}[Planner] 连续回复 >= 3 次,强制移除 text_reply 和 emoji_reply")
|
||||||
|
actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"])
|
||||||
|
elif lian_xu_wen_ben_hui_fu == 2:
|
||||||
|
if probability_roll < 0.8: # 80% 概率
|
||||||
|
logger.info(f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (触发)")
|
||||||
|
actions_to_remove_temporarily.extend(["text_reply", "emoji_reply"])
|
||||||
|
else:
|
||||||
|
logger.info(
|
||||||
|
f"{self.log_prefix}[Planner] 连续回复 2 次,80% 概率移除 text_reply 和 emoji_reply (未触发)"
|
||||||
|
)
|
||||||
|
elif lian_xu_wen_ben_hui_fu == 1:
|
||||||
|
if probability_roll < 0.4: # 40% 概率
|
||||||
|
logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (触发)")
|
||||||
|
actions_to_remove_temporarily.append("text_reply")
|
||||||
|
else:
|
||||||
|
logger.info(f"{self.log_prefix}[Planner] 连续回复 1 次,40% 概率移除 text_reply (未触发)")
|
||||||
|
# 如果 lian_xu_wen_ben_hui_fu == 0,则不移除任何动作
|
||||||
|
# --- 结束:检查历史动作 ---
|
||||||
|
|
||||||
# 获取观察信息
|
# 获取观察信息
|
||||||
observation = self.observations[0]
|
observation = self.observations[0]
|
||||||
if is_re_planned:
|
if is_re_planned:
|
||||||
@@ -754,6 +848,13 @@ class HeartFChatting:
|
|||||||
emoji_query = "" # <--- 在这里初始化 emoji_query
|
emoji_query = "" # <--- 在这里初始化 emoji_query
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# --- 新增:应用临时动作移除 ---
|
||||||
|
if actions_to_remove_temporarily:
|
||||||
|
self.action_manager.temporarily_remove_actions(actions_to_remove_temporarily)
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix}[Planner] 临时移除的动作: {actions_to_remove_temporarily}, 当前可用: {list(self.action_manager.get_available_actions().keys())}"
|
||||||
|
)
|
||||||
|
|
||||||
# --- 构建提示词 ---
|
# --- 构建提示词 ---
|
||||||
replan_prompt_str = ""
|
replan_prompt_str = ""
|
||||||
if is_re_planned:
|
if is_re_planned:
|
||||||
@@ -767,6 +868,7 @@ class HeartFChatting:
|
|||||||
# --- 调用 LLM ---
|
# --- 调用 LLM ---
|
||||||
try:
|
try:
|
||||||
planner_tools = self.action_manager.get_planner_tool_definition()
|
planner_tools = self.action_manager.get_planner_tool_definition()
|
||||||
|
logger.debug(f"{self.log_prefix}[Planner] 本次使用的工具定义: {planner_tools}") # 记录本次使用的工具
|
||||||
_response_text, _reasoning_content, tool_calls = await self.planner_llm.generate_response_tool_async(
|
_response_text, _reasoning_content, tool_calls = await self.planner_llm.generate_response_tool_async(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
tools=planner_tools,
|
tools=planner_tools,
|
||||||
@@ -810,15 +912,25 @@ class HeartFChatting:
|
|||||||
extracted_action = arguments.get("action", "no_reply")
|
extracted_action = arguments.get("action", "no_reply")
|
||||||
# 验证动作
|
# 验证动作
|
||||||
if extracted_action not in self.action_manager.get_available_actions():
|
if extracted_action not in self.action_manager.get_available_actions():
|
||||||
|
# 如果LLM返回了一个此时不该用的动作(因为被临时移除了)
|
||||||
|
# 或者完全无效的动作
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"{self.log_prefix}[Planner] LLM返回了未授权的动作: {extracted_action},使用默认动作no_reply"
|
f"{self.log_prefix}[Planner] LLM返回了当前不可用或无效的动作: {extracted_action},将强制使用 'no_reply'"
|
||||||
)
|
)
|
||||||
action = "no_reply"
|
action = "no_reply"
|
||||||
reasoning = f"LLM返回了未授权的动作: {extracted_action}"
|
reasoning = f"LLM返回了当前不可用的动作: {extracted_action}"
|
||||||
emoji_query = ""
|
emoji_query = ""
|
||||||
llm_error = False # 视为非LLM错误,只是逻辑修正
|
llm_error = False # 视为逻辑修正而非 LLM 错误
|
||||||
|
# --- 检查 'no_reply' 是否也恰好被移除了 (极端情况) ---
|
||||||
|
if "no_reply" not in self.action_manager.get_available_actions():
|
||||||
|
logger.error(
|
||||||
|
f"{self.log_prefix}[Planner] 严重错误:'no_reply' 动作也不可用!无法执行任何动作。"
|
||||||
|
)
|
||||||
|
action = "error" # 回退到错误状态
|
||||||
|
reasoning = "无法执行任何有效动作,包括 no_reply"
|
||||||
|
llm_error = True
|
||||||
else:
|
else:
|
||||||
# 动作有效,使用提取的值
|
# 动作有效且可用,使用提取的值
|
||||||
action = extracted_action
|
action = extracted_action
|
||||||
reasoning = arguments.get("reasoning", "未提供理由")
|
reasoning = arguments.get("reasoning", "未提供理由")
|
||||||
emoji_query = arguments.get("emoji_query", "")
|
emoji_query = arguments.get("emoji_query", "")
|
||||||
@@ -837,8 +949,20 @@ class HeartFChatting:
|
|||||||
reasoning = f"验证工具调用失败: {error_msg}"
|
reasoning = f"验证工具调用失败: {error_msg}"
|
||||||
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
|
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
|
||||||
else: # not valid_tool_calls
|
else: # not valid_tool_calls
|
||||||
reasoning = "LLM未返回有效的工具调用"
|
# 如果没有有效的工具调用,我们需要检查 'no_reply' 是否是当前唯一可用的动作
|
||||||
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
|
available_actions = list(self.action_manager.get_available_actions().keys())
|
||||||
|
if available_actions == ["no_reply"]:
|
||||||
|
logger.info(
|
||||||
|
f"{self.log_prefix}[Planner] LLM未返回工具调用,但当前唯一可用动作是 'no_reply',将执行 'no_reply'"
|
||||||
|
)
|
||||||
|
action = "no_reply"
|
||||||
|
reasoning = "LLM未返回工具调用,且当前仅 'no_reply' 可用"
|
||||||
|
emoji_query = ""
|
||||||
|
llm_error = False # 视为逻辑选择而非错误
|
||||||
|
else:
|
||||||
|
reasoning = "LLM未返回有效的工具调用"
|
||||||
|
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
|
||||||
|
# llm_error 保持为 True
|
||||||
# 如果 llm_error 仍然是 True,说明在处理过程中有错误发生
|
# 如果 llm_error 仍然是 True,说明在处理过程中有错误发生
|
||||||
|
|
||||||
except Exception as llm_e:
|
except Exception as llm_e:
|
||||||
@@ -847,6 +971,14 @@ class HeartFChatting:
|
|||||||
action = "error"
|
action = "error"
|
||||||
reasoning = f"Planner内部处理错误: {llm_e}"
|
reasoning = f"Planner内部处理错误: {llm_e}"
|
||||||
llm_error = True
|
llm_error = True
|
||||||
|
# --- 新增:确保动作恢复 ---
|
||||||
|
finally:
|
||||||
|
if actions_to_remove_temporarily: # 只有当确实移除了动作时才需要恢复
|
||||||
|
self.action_manager.restore_actions()
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}"
|
||||||
|
)
|
||||||
|
# --- 结束:确保动作恢复 ---
|
||||||
# --- 结束 LLM 决策 --- #
|
# --- 结束 LLM 决策 --- #
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -85,6 +85,7 @@ def init_prompt():
|
|||||||
- 遵守回复原则
|
- 遵守回复原则
|
||||||
- 必须调用工具并包含action和reasoning
|
- 必须调用工具并包含action和reasoning
|
||||||
- 你可以选择文字回复(text_reply),纯表情回复(emoji_reply),不回复(no_reply)
|
- 你可以选择文字回复(text_reply),纯表情回复(emoji_reply),不回复(no_reply)
|
||||||
|
- 并不是所有选择都可用
|
||||||
- 选择text_reply或emoji_reply时必须提供emoji_query
|
- 选择text_reply或emoji_reply时必须提供emoji_query
|
||||||
- 保持回复自然,符合日常聊天习惯""",
|
- 保持回复自然,符合日常聊天习惯""",
|
||||||
"planner_prompt",
|
"planner_prompt",
|
||||||
@@ -108,7 +109,7 @@ def init_prompt():
|
|||||||
Prompt(
|
Prompt(
|
||||||
"""
|
"""
|
||||||
{memory_prompt}
|
{memory_prompt}
|
||||||
{relation_prompt_all}
|
{relation_prompt}
|
||||||
{prompt_info}
|
{prompt_info}
|
||||||
{schedule_prompt}
|
{schedule_prompt}
|
||||||
{chat_target}
|
{chat_target}
|
||||||
@@ -260,6 +261,9 @@ class PromptBuilder:
|
|||||||
relation_prompt = ""
|
relation_prompt = ""
|
||||||
for person in who_chat_in_group:
|
for person in who_chat_in_group:
|
||||||
relation_prompt += await relationship_manager.build_relationship_info(person)
|
relation_prompt += await relationship_manager.build_relationship_info(person)
|
||||||
|
print(f"relation_prompt: {relation_prompt}")
|
||||||
|
|
||||||
|
print(f"relat11111111ion_prompt: {relation_prompt}")
|
||||||
|
|
||||||
# 心情
|
# 心情
|
||||||
mood_manager = MoodManager.get_instance()
|
mood_manager = MoodManager.get_instance()
|
||||||
@@ -373,7 +377,6 @@ class PromptBuilder:
|
|||||||
|
|
||||||
prompt = await global_prompt_manager.format_prompt(
|
prompt = await global_prompt_manager.format_prompt(
|
||||||
"reasoning_prompt_main",
|
"reasoning_prompt_main",
|
||||||
relation_prompt_all=await global_prompt_manager.get_prompt_async("relationship_prompt"),
|
|
||||||
relation_prompt=relation_prompt,
|
relation_prompt=relation_prompt,
|
||||||
sender_name=sender_name,
|
sender_name=sender_name,
|
||||||
memory_prompt=memory_prompt,
|
memory_prompt=memory_prompt,
|
||||||
|
|||||||
@@ -137,34 +137,55 @@ class PersonInfoManager:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_json_from_text(text: str) -> dict:
|
def _extract_json_from_text(text: str) -> dict:
|
||||||
"""从文本中提取JSON数据的高容错方法"""
|
"""从文本中提取JSON数据的高容错方法"""
|
||||||
|
parsed_json = None
|
||||||
try:
|
try:
|
||||||
# 尝试直接解析
|
# 尝试直接解析
|
||||||
return json.loads(text)
|
parsed_json = json.loads(text)
|
||||||
|
# 如果解析结果是列表,尝试取第一个元素
|
||||||
|
if isinstance(parsed_json, list):
|
||||||
|
if parsed_json: # 检查列表是否为空
|
||||||
|
parsed_json = parsed_json[0]
|
||||||
|
else: # 如果列表为空,重置为 None,走后续逻辑
|
||||||
|
parsed_json = None
|
||||||
|
# 确保解析结果是字典
|
||||||
|
if isinstance(parsed_json, dict):
|
||||||
|
return parsed_json
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
try:
|
# 解析失败,继续尝试其他方法
|
||||||
# 尝试找到JSON格式的部分
|
pass
|
||||||
json_pattern = r"\{[^{}]*\}"
|
except Exception as e:
|
||||||
matches = re.findall(json_pattern, text)
|
logger.warning(f"尝试直接解析JSON时发生意外错误: {e}")
|
||||||
if matches:
|
pass # 继续尝试其他方法
|
||||||
return json.loads(matches[0])
|
|
||||||
|
|
||||||
# 如果上面都失败了,尝试提取键值对
|
# 如果直接解析失败或结果不是字典
|
||||||
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
|
try:
|
||||||
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
|
# 尝试找到JSON对象格式的部分
|
||||||
|
json_pattern = r"\{[^{}]*\}"
|
||||||
|
matches = re.findall(json_pattern, text)
|
||||||
|
if matches:
|
||||||
|
parsed_obj = json.loads(matches[0])
|
||||||
|
if isinstance(parsed_obj, dict): # 确保是字典
|
||||||
|
return parsed_obj
|
||||||
|
|
||||||
nickname_match = re.search(nickname_pattern, text)
|
# 如果上面都失败了,尝试提取键值对
|
||||||
reason_match = re.search(reason_pattern, text)
|
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
|
||||||
|
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
|
||||||
|
|
||||||
if nickname_match:
|
nickname_match = re.search(nickname_pattern, text)
|
||||||
return {
|
reason_match = re.search(reason_pattern, text)
|
||||||
"nickname": nickname_match.group(1),
|
|
||||||
"reason": reason_match.group(1) if reason_match else "未提供理由",
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"JSON提取失败: {str(e)}")
|
|
||||||
|
|
||||||
# 如果所有方法都失败了,返回空结果
|
if nickname_match:
|
||||||
return {"nickname": "", "reason": ""}
|
return {
|
||||||
|
"nickname": nickname_match.group(1),
|
||||||
|
"reason": reason_match.group(1) if reason_match else "未提供理由",
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"后备JSON提取失败: {str(e)}")
|
||||||
|
|
||||||
|
# 如果所有方法都失败了,返回默认字典
|
||||||
|
logger.warning(f"无法从文本中提取有效的JSON字典: {text}")
|
||||||
|
return {"nickname": "", "reason": ""}
|
||||||
|
|
||||||
async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
|
async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
|
||||||
"""给某个用户取名"""
|
"""给某个用户取名"""
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import math
|
|||||||
from bson.decimal128 import Decimal128
|
from bson.decimal128 import Decimal128
|
||||||
from .person_info import person_info_manager
|
from .person_info import person_info_manager
|
||||||
import time
|
import time
|
||||||
|
import random
|
||||||
# import re
|
# import re
|
||||||
# import traceback
|
# import traceback
|
||||||
|
|
||||||
@@ -277,22 +278,44 @@ class RelationshipManager:
|
|||||||
|
|
||||||
return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
|
return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
|
||||||
|
|
||||||
async def build_relationship_info(self, person) -> str:
|
async def build_relationship_info(self, person, is_id: bool = False) -> str:
|
||||||
person_id = person_info_manager.get_person_id(person[0], person[1])
|
if is_id:
|
||||||
|
person_id = person
|
||||||
|
else:
|
||||||
|
print(f"person: {person}")
|
||||||
|
person_id = person_info_manager.get_person_id(person[0], person[1])
|
||||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||||
|
print(f"person_name: {person_name}")
|
||||||
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
|
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
|
||||||
level_num = self.calculate_level_num(relationship_value)
|
level_num = self.calculate_level_num(relationship_value)
|
||||||
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
|
||||||
relation_prompt2_list = [
|
|
||||||
"忽视的回应",
|
|
||||||
"冷淡回复",
|
|
||||||
"保持理性",
|
|
||||||
"愿意回复",
|
|
||||||
"积极回复",
|
|
||||||
"友善和包容的回复",
|
|
||||||
]
|
|
||||||
|
|
||||||
return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n"
|
if level_num == 0 or level_num == 5:
|
||||||
|
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
||||||
|
relation_prompt2_list = [
|
||||||
|
"忽视的回应",
|
||||||
|
"冷淡回复",
|
||||||
|
"保持理性",
|
||||||
|
"愿意回复",
|
||||||
|
"积极回复",
|
||||||
|
"友善和包容的回复",
|
||||||
|
]
|
||||||
|
return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n"
|
||||||
|
elif level_num == 2:
|
||||||
|
return ""
|
||||||
|
else:
|
||||||
|
if random.random() < 0.6:
|
||||||
|
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
||||||
|
relation_prompt2_list = [
|
||||||
|
"忽视的回应",
|
||||||
|
"冷淡回复",
|
||||||
|
"保持理性",
|
||||||
|
"愿意回复",
|
||||||
|
"积极回复",
|
||||||
|
"友善和包容的回复",
|
||||||
|
]
|
||||||
|
return f"你{relationship_level[level_num]}{person_name},打算{relation_prompt2_list[level_num]}。\n"
|
||||||
|
else:
|
||||||
|
return ""
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def calculate_level_num(relationship_value) -> int:
|
def calculate_level_num(relationship_value) -> int:
|
||||||
|
|||||||
@@ -213,17 +213,22 @@ async def _build_readable_messages_internal(
|
|||||||
original_len = len(content)
|
original_len = len(content)
|
||||||
limit = -1 # 默认不截断
|
limit = -1 # 默认不截断
|
||||||
|
|
||||||
if percentile < 0.6: # 60% 之前的消息 (即最旧的 60%)
|
if percentile < 0.2: # 60% 之前的消息 (即最旧的 60%)
|
||||||
limit = 170
|
limit = 50
|
||||||
elif percentile < 0.8: # 60% 到 80% 之前的消息 (即中间的 20%)
|
replace_content = "......(记不清了)"
|
||||||
limit = 250
|
elif percentile < 0.5: # 60% 之前的消息 (即最旧的 60%)
|
||||||
|
limit = 100
|
||||||
|
replace_content = "......(有点记不清了)"
|
||||||
|
elif percentile < 0.7: # 60% 到 80% 之前的消息 (即中间的 20%)
|
||||||
|
limit = 200
|
||||||
|
replace_content = "......(内容太长了)"
|
||||||
elif percentile < 1.0: # 80% 到 100% 之前的消息 (即较新的 20%)
|
elif percentile < 1.0: # 80% 到 100% 之前的消息 (即较新的 20%)
|
||||||
limit = 500
|
limit = 300
|
||||||
# 最新的 20% (理论上 percentile 会趋近 1,但这里不需要显式处理,因为 limit 默认为 -1)
|
replace_content = "......(太长了)"
|
||||||
|
|
||||||
truncated_content = content
|
truncated_content = content
|
||||||
if limit > 0 and original_len > limit:
|
if limit > 0 and original_len > limit:
|
||||||
truncated_content = f"{content[:limit]}......(内容太长)"
|
truncated_content = f"{content[:limit]}{replace_content}"
|
||||||
|
|
||||||
message_details.append((timestamp, name, truncated_content))
|
message_details.append((timestamp, name, truncated_content))
|
||||||
else:
|
else:
|
||||||
@@ -343,7 +348,10 @@ async def build_readable_messages(
|
|||||||
messages_before_mark, replace_bot_name, merge_messages, timestamp_mode, truncate
|
messages_before_mark, replace_bot_name, merge_messages, timestamp_mode, truncate
|
||||||
)
|
)
|
||||||
formatted_after, _ = await _build_readable_messages_internal(
|
formatted_after, _ = await _build_readable_messages_internal(
|
||||||
messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, truncate
|
messages_after_mark,
|
||||||
|
replace_bot_name,
|
||||||
|
merge_messages,
|
||||||
|
timestamp_mode,
|
||||||
)
|
)
|
||||||
|
|
||||||
readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode)
|
readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode)
|
||||||
@@ -359,3 +367,33 @@ async def build_readable_messages(
|
|||||||
else:
|
else:
|
||||||
# 理论上不应该发生,但作为保险
|
# 理论上不应该发生,但作为保险
|
||||||
return read_mark_line.strip() # 如果前后都无消息,只返回标记行
|
return read_mark_line.strip() # 如果前后都无消息,只返回标记行
|
||||||
|
|
||||||
|
|
||||||
|
async def get_person_id_list(messages: List[Dict[str, Any]]) -> List[str]:
|
||||||
|
"""
|
||||||
|
从消息列表中提取不重复的 person_id 列表 (忽略机器人自身)。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: 消息字典列表。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
一个包含唯一 person_id 的列表。
|
||||||
|
"""
|
||||||
|
person_ids_set = set() # 使用集合来自动去重
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
user_info = msg.get("user_info", {})
|
||||||
|
platform = user_info.get("platform")
|
||||||
|
user_id = user_info.get("user_id")
|
||||||
|
|
||||||
|
# 检查必要信息是否存在 且 不是机器人自己
|
||||||
|
if not all([platform, user_id]) or user_id == global_config.BOT_QQ:
|
||||||
|
continue
|
||||||
|
|
||||||
|
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||||
|
|
||||||
|
# 只有当获取到有效 person_id 时才添加
|
||||||
|
if person_id:
|
||||||
|
person_ids_set.add(person_id)
|
||||||
|
|
||||||
|
return list(person_ids_set) # 将集合转换为列表返回
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ version = "1.5.1"
|
|||||||
# 主版本号:当你做了不兼容的 API 修改,
|
# 主版本号:当你做了不兼容的 API 修改,
|
||||||
# 次版本号:当你做了向下兼容的功能性新增,
|
# 次版本号:当你做了向下兼容的功能性新增,
|
||||||
# 修订号:当你做了向下兼容的问题修正。
|
# 修订号:当你做了向下兼容的问题修正。
|
||||||
# 先行版本号及版本编译信息可以加到“主版本号.次版本号.修订号”的后面,作为延伸。
|
# 先行版本号及版本编译信息可以加到"主版本号.次版本号.修订号"的后面,作为延伸。
|
||||||
#----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
#----以上是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||||
|
|
||||||
[bot]
|
[bot]
|
||||||
@@ -66,20 +66,20 @@ time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运
|
|||||||
nonebot-qq="http://127.0.0.1:18002/api/message"
|
nonebot-qq="http://127.0.0.1:18002/api/message"
|
||||||
|
|
||||||
[response] #群聊的回复策略
|
[response] #群聊的回复策略
|
||||||
enable_heart_flowC = true
|
|
||||||
# 该功能还在完善中
|
|
||||||
# 是否启用heart_flowC(心流聊天,HFC)模式
|
|
||||||
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token
|
|
||||||
|
|
||||||
#一般回复参数
|
#一般回复参数
|
||||||
model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率
|
model_reasoning_probability = 0.7 # 麦麦回答时选择推理模型 模型的概率
|
||||||
model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率
|
model_normal_probability = 0.3 # 麦麦回答时选择一般模型 模型的概率
|
||||||
|
|
||||||
[heartflow] #启用启用heart_flowC(心流聊天)模式时生效,需要填写以下参数
|
[heartflow]
|
||||||
|
allow_focus_mode = true # 是否允许进入FOCUSED状态
|
||||||
|
# 是否启用heart_flowC(心流聊天,HFC)模式
|
||||||
|
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token
|
||||||
reply_trigger_threshold = 3.0 # 心流聊天触发阈值,越低越容易进入心流聊天
|
reply_trigger_threshold = 3.0 # 心流聊天触发阈值,越低越容易进入心流聊天
|
||||||
probability_decay_factor_per_second = 0.2 # 概率衰减因子,越大衰减越快,越高越容易退出心流聊天
|
probability_decay_factor_per_second = 0.2 # 概率衰减因子,越大衰减越快,越高越容易退出心流聊天
|
||||||
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入心流聊天
|
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入心流聊天
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
||||||
|
|
||||||
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
||||||
@@ -161,7 +161,7 @@ reaction = "有人说你是人机或者机器人,否定这一事实,攻击
|
|||||||
[[keywords_reaction.rules]] # 就像这样复制
|
[[keywords_reaction.rules]] # 就像这样复制
|
||||||
enable = false # 仅作示例,不会触发
|
enable = false # 仅作示例,不会触发
|
||||||
keywords = ["测试关键词回复","test",""]
|
keywords = ["测试关键词回复","test",""]
|
||||||
reaction = "回答“测试成功”"
|
reaction = "回答“测试成功”" # 修复错误的引号
|
||||||
|
|
||||||
[[keywords_reaction.rules]] # 使用正则表达式匹配句式
|
[[keywords_reaction.rules]] # 使用正则表达式匹配句式
|
||||||
enable = false # 仅作示例,不会触发
|
enable = false # 仅作示例,不会触发
|
||||||
|
|||||||
Reference in New Issue
Block a user