feat:心流查重和心流关系启用,关系prompt优化
This commit is contained in:
@@ -20,6 +20,7 @@
|
|||||||
- **流程优化**: 拆分了子心流的思考模块,使整体对话流程更加清晰。
|
- **流程优化**: 拆分了子心流的思考模块,使整体对话流程更加清晰。
|
||||||
- **状态判断改进**: 将 CHAT 状态判断交给 LLM 处理,使对话更自然。
|
- **状态判断改进**: 将 CHAT 状态判断交给 LLM 处理,使对话更自然。
|
||||||
- **回复机制**: 实现更为灵活的概率回复机制,使机器人能够自然地融入群聊环境。
|
- **回复机制**: 实现更为灵活的概率回复机制,使机器人能够自然地融入群聊环境。
|
||||||
|
- **重复性检查**: 加入心流回复重复性检查机制,防止麦麦陷入固定回复模式。
|
||||||
|
|
||||||
#### 全新知识库系统 (New Knowledge Base System - LPMM)
|
#### 全新知识库系统 (New Knowledge Base System - LPMM)
|
||||||
- **引入 LPMM**: 新增了 **LPMM (Large Psychology Model Maker)** 知识库系统,具有强大的信息检索能力,能显著提升麦麦获取和利用知识的效率。
|
- **引入 LPMM**: 新增了 **LPMM (Large Psychology Model Maker)** 知识库系统,具有强大的信息检索能力,能显著提升麦麦获取和利用知识的效率。
|
||||||
@@ -32,8 +33,11 @@
|
|||||||
|
|
||||||
#### 记忆与上下文增强 (Memory and Context Enhancement)
|
#### 记忆与上下文增强 (Memory and Context Enhancement)
|
||||||
- **聊天记录压缩**: 大幅优化聊天记录压缩系统,使机器人能够处理5倍于之前的上下文记忆量。
|
- **聊天记录压缩**: 大幅优化聊天记录压缩系统,使机器人能够处理5倍于之前的上下文记忆量。
|
||||||
|
- **长消息截断**: 新增了长消息自动截断与模糊化功能,随着时间推移降低超长消息的权重,避免被特定冗余信息干扰。
|
||||||
- **记忆提取**: 优化记忆提取功能,提高对历史对话的理解和引用能力。
|
- **记忆提取**: 优化记忆提取功能,提高对历史对话的理解和引用能力。
|
||||||
|
- **记忆整合**: 为记忆系统加入了合并与整合机制,优化长期记忆的结构与效率。
|
||||||
- **中期记忆调用**: 完善中期记忆调用机制,使机器人能够更自然地回忆和引用较早前的对话。
|
- **中期记忆调用**: 完善中期记忆调用机制,使机器人能够更自然地回忆和引用较早前的对话。
|
||||||
|
- **Prompt 优化**: 进一步优化了关系系统和记忆系统相关的提示词(prompt)。
|
||||||
|
|
||||||
#### 私聊 PFC 功能增强 (Private Chat PFC Enhancement)
|
#### 私聊 PFC 功能增强 (Private Chat PFC Enhancement)
|
||||||
- **功能修复与优化**: 修复了私聊 PFC 载入聊天记录缺失的 bug,优化了 prompt 构建,增加了审核机制,调整了重试次数,并将机器人发言存入数据库。
|
- **功能修复与优化**: 修复了私聊 PFC 载入聊天记录缺失的 bug,优化了 prompt 构建,增加了审核机制,调整了重试次数,并将机器人发言存入数据库。
|
||||||
@@ -41,9 +45,9 @@
|
|||||||
|
|
||||||
#### 情感与互动增强 (Emotion and Interaction Enhancement)
|
#### 情感与互动增强 (Emotion and Interaction Enhancement)
|
||||||
- **全新表情包系统**: 新的表情包系统上线,表情含义更丰富,发送更快速。
|
- **全新表情包系统**: 新的表情包系统上线,表情含义更丰富,发送更快速。
|
||||||
|
- **表情包使用优化**: 优化了表情包的选择逻辑,减少重复使用特定表情包的情况,使表达更生动。
|
||||||
- **提示词优化**: 优化提示词(prompt)构建,增强对话质量和情感表达。
|
- **提示词优化**: 优化提示词(prompt)构建,增强对话质量和情感表达。
|
||||||
- **积极性配置**: 优化"让麦麦更愿意说话"的相关配置,使机器人更积极参与对话。
|
- **积极性配置**: 优化"让麦麦更愿意说话"的相关配置,使机器人更积极参与对话。
|
||||||
- **命名统一**: 实现统一命名功能,自动替换 prompt 内唯一标识符,优化 prompt 效果。
|
|
||||||
- **颜文字保护**: 保护颜文字处理机制,确保表情正确显示。
|
- **颜文字保护**: 保护颜文字处理机制,确保表情正确显示。
|
||||||
|
|
||||||
#### 工具与集成 (Tools and Integration)
|
#### 工具与集成 (Tools and Integration)
|
||||||
|
|||||||
@@ -23,20 +23,21 @@ def ji_suan_ti_huan_gai_lv(xiang_si_du: float) -> float:
|
|||||||
规则:
|
规则:
|
||||||
- 相似度 <= 0.4: 概率 = 0
|
- 相似度 <= 0.4: 概率 = 0
|
||||||
- 相似度 >= 0.9: 概率 = 1
|
- 相似度 >= 0.9: 概率 = 1
|
||||||
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.5)
|
- 相似度 == 0.6: 概率 = 0.7
|
||||||
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.5) 到 (0.9, 1.0)
|
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7)
|
||||||
|
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0)
|
||||||
"""
|
"""
|
||||||
if xiang_si_du <= 0.4:
|
if xiang_si_du <= 0.4:
|
||||||
return 0.0
|
return 0.0
|
||||||
elif xiang_si_du >= 0.9:
|
elif xiang_si_du >= 0.9:
|
||||||
return 1.0
|
return 1.0
|
||||||
elif 0.4 < xiang_si_du <= 0.6:
|
elif 0.4 < xiang_si_du <= 0.6:
|
||||||
# p = 2.5 * s - 1.0 (线性方程 y - 0 = (0.5-0)/(0.6-0.4) * (x - 0.4))
|
# p = 3.5 * s - 1.4 (线性方程 y - 0 = (0.7-0)/(0.6-0.4) * (x - 0.4))
|
||||||
gai_lv = 2.5 * xiang_si_du - 1.0
|
gai_lv = 3.5 * xiang_si_du - 1.4
|
||||||
return max(0.0, gai_lv) # 确保概率不小于0
|
return max(0.0, gai_lv) # 确保概率不小于0
|
||||||
elif 0.6 < xiang_si_du < 0.9:
|
elif 0.6 < xiang_si_du < 0.9:
|
||||||
# p = (5/3) * s - 0.5 (线性方程 y - 0.5 = (1-0.5)/(0.9-0.6) * (x - 0.6))
|
# p = s + 0.1 (线性方程 y - 0.7 = (1-0.7)/(0.9-0.6) * (x - 0.6))
|
||||||
gai_lv = (5 / 3) * xiang_si_du - 0.5
|
gai_lv = xiang_si_du + 0.1
|
||||||
return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间
|
return min(1.0, max(0.0, gai_lv)) # 确保概率在 0 和 1 之间
|
||||||
|
|
||||||
# 获取用户输入
|
# 获取用户输入
|
||||||
|
|||||||
@@ -48,9 +48,11 @@ class GetMemoryTool(BaseTool):
|
|||||||
memory_info += memory[1] + "\n"
|
memory_info += memory[1] + "\n"
|
||||||
|
|
||||||
if memory_info:
|
if memory_info:
|
||||||
content = f"你记得这些事情: {memory_info}"
|
content = f"你记得这些事情: {memory_info}\n"
|
||||||
|
content += "以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
|
||||||
|
|
||||||
else:
|
else:
|
||||||
content = f"你不太记得有关{topic}的记忆,你对此不太了解"
|
content = f"{topic}的记忆,你记不太清"
|
||||||
|
|
||||||
return {"name": "get_memory", "content": content}
|
return {"name": "get_memory", "content": content}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ from src.plugins.utils.chat_message_builder import (
|
|||||||
build_readable_messages,
|
build_readable_messages,
|
||||||
get_raw_msg_by_timestamp_with_chat,
|
get_raw_msg_by_timestamp_with_chat,
|
||||||
num_new_messages_since,
|
num_new_messages_since,
|
||||||
|
get_person_id_list,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = get_logger("observation")
|
logger = get_logger("observation")
|
||||||
@@ -46,6 +47,8 @@ class ChattingObservation(Observation):
|
|||||||
self.max_mid_memory_len = global_config.compress_length_limit
|
self.max_mid_memory_len = global_config.compress_length_limit
|
||||||
self.mid_memory_info = ""
|
self.mid_memory_info = ""
|
||||||
|
|
||||||
|
self.person_list = []
|
||||||
|
|
||||||
self.llm_summary = LLMRequest(
|
self.llm_summary = LLMRequest(
|
||||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||||
)
|
)
|
||||||
@@ -153,6 +156,12 @@ class ChattingObservation(Observation):
|
|||||||
truncate=True,
|
truncate=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.person_list = await get_person_id_list(self.talking_message)
|
||||||
|
|
||||||
|
# print(f"self.11111person_list: {self.person_list}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
logger.trace(
|
logger.trace(
|
||||||
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
f"Chat {self.chat_id} - 压缩早期记忆:{self.mid_memory_info}\n现在聊天内容:{self.talking_message_str}"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls
|
|||||||
from src.heart_flow.chat_state_info import ChatStateInfo
|
from src.heart_flow.chat_state_info import ChatStateInfo
|
||||||
from src.plugins.chat.chat_stream import chat_manager
|
from src.plugins.chat.chat_stream import chat_manager
|
||||||
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
|
||||||
|
import difflib
|
||||||
|
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
logger = get_logger("sub_heartflow")
|
logger = get_logger("sub_heartflow")
|
||||||
@@ -20,6 +23,7 @@ logger = get_logger("sub_heartflow")
|
|||||||
def init_prompt():
|
def init_prompt():
|
||||||
prompt = ""
|
prompt = ""
|
||||||
prompt += "{extra_info}\n"
|
prompt += "{extra_info}\n"
|
||||||
|
prompt += "{relation_prompt}\n"
|
||||||
prompt += "你的名字是{bot_name},{prompt_personality}\n"
|
prompt += "你的名字是{bot_name},{prompt_personality}\n"
|
||||||
prompt += "{last_loop_prompt}\n"
|
prompt += "{last_loop_prompt}\n"
|
||||||
prompt += "{cycle_info_block}\n"
|
prompt += "{cycle_info_block}\n"
|
||||||
@@ -47,6 +51,39 @@ def init_prompt():
|
|||||||
Prompt(prompt, "last_loop")
|
Prompt(prompt, "last_loop")
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_similarity(text_a: str, text_b: str) -> float:
|
||||||
|
"""
|
||||||
|
计算两个文本字符串的相似度。
|
||||||
|
"""
|
||||||
|
if not text_a or not text_b:
|
||||||
|
return 0.0
|
||||||
|
matcher = difflib.SequenceMatcher(None, text_a, text_b)
|
||||||
|
return matcher.ratio()
|
||||||
|
|
||||||
|
def calculate_replacement_probability(similarity: float) -> float:
|
||||||
|
"""
|
||||||
|
根据相似度计算替换的概率。
|
||||||
|
规则:
|
||||||
|
- 相似度 <= 0.4: 概率 = 0
|
||||||
|
- 相似度 >= 0.9: 概率 = 1
|
||||||
|
- 相似度 == 0.6: 概率 = 0.7
|
||||||
|
- 0.4 < 相似度 <= 0.6: 线性插值 (0.4, 0) 到 (0.6, 0.7)
|
||||||
|
- 0.6 < 相似度 < 0.9: 线性插值 (0.6, 0.7) 到 (0.9, 1.0)
|
||||||
|
"""
|
||||||
|
if similarity <= 0.4:
|
||||||
|
return 0.0
|
||||||
|
elif similarity >= 0.9:
|
||||||
|
return 1.0
|
||||||
|
elif 0.4 < similarity <= 0.6:
|
||||||
|
# p = 3.5 * s - 1.4
|
||||||
|
probability = 3.5 * similarity - 1.4
|
||||||
|
return max(0.0, probability)
|
||||||
|
elif 0.6 < similarity < 0.9:
|
||||||
|
# p = s + 0.1
|
||||||
|
probability = similarity + 0.1
|
||||||
|
return min(1.0, max(0.0, probability))
|
||||||
|
|
||||||
|
|
||||||
class SubMind:
|
class SubMind:
|
||||||
def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: Observation):
|
def __init__(self, subheartflow_id: str, chat_state: ChatStateInfo, observations: Observation):
|
||||||
self.subheartflow_id = subheartflow_id
|
self.subheartflow_id = subheartflow_id
|
||||||
@@ -80,7 +117,7 @@ class SubMind:
|
|||||||
|
|
||||||
# ---------- 1. 准备基础数据 ----------
|
# ---------- 1. 准备基础数据 ----------
|
||||||
# 获取现有想法和情绪状态
|
# 获取现有想法和情绪状态
|
||||||
current_thinking_info = self.current_mind
|
previous_mind = self.current_mind if self.current_mind else ""
|
||||||
mood_info = self.chat_state.mood
|
mood_info = self.chat_state.mood
|
||||||
|
|
||||||
# 获取观察对象
|
# 获取观察对象
|
||||||
@@ -92,6 +129,7 @@ class SubMind:
|
|||||||
|
|
||||||
# 获取观察内容
|
# 获取观察内容
|
||||||
chat_observe_info = observation.get_observe_info()
|
chat_observe_info = observation.get_observe_info()
|
||||||
|
person_list = observation.person_list
|
||||||
|
|
||||||
# ---------- 2. 准备工具和个性化数据 ----------
|
# ---------- 2. 准备工具和个性化数据 ----------
|
||||||
# 初始化工具
|
# 初始化工具
|
||||||
@@ -101,6 +139,14 @@ class SubMind:
|
|||||||
# 获取个性化信息
|
# 获取个性化信息
|
||||||
individuality = Individuality.get_instance()
|
individuality = Individuality.get_instance()
|
||||||
|
|
||||||
|
|
||||||
|
relation_prompt = ""
|
||||||
|
print(f"person_list: {person_list}")
|
||||||
|
for person in person_list:
|
||||||
|
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
|
||||||
|
|
||||||
|
print(f"relat22222ion_prompt: {relation_prompt}")
|
||||||
|
|
||||||
# 构建个性部分
|
# 构建个性部分
|
||||||
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
||||||
|
|
||||||
@@ -136,9 +182,9 @@ class SubMind:
|
|||||||
last_reasoning = ""
|
last_reasoning = ""
|
||||||
is_replan = False
|
is_replan = False
|
||||||
if_replan_prompt = ""
|
if_replan_prompt = ""
|
||||||
if current_thinking_info:
|
if previous_mind:
|
||||||
last_loop_prompt = (await global_prompt_manager.get_prompt_async("last_loop")).format(
|
last_loop_prompt = (await global_prompt_manager.get_prompt_async("last_loop")).format(
|
||||||
current_thinking_info=current_thinking_info, if_replan_prompt=if_replan_prompt
|
current_thinking_info=previous_mind, if_replan_prompt=if_replan_prompt
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
last_loop_prompt = ""
|
last_loop_prompt = ""
|
||||||
@@ -196,6 +242,7 @@ class SubMind:
|
|||||||
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
|
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
|
||||||
extra_info="", # 可以在这里添加额外信息
|
extra_info="", # 可以在这里添加额外信息
|
||||||
prompt_personality=prompt_personality,
|
prompt_personality=prompt_personality,
|
||||||
|
relation_prompt=relation_prompt,
|
||||||
bot_name=individuality.name,
|
bot_name=individuality.name,
|
||||||
time_now=time_now,
|
time_now=time_now,
|
||||||
chat_observe_info=chat_observe_info,
|
chat_observe_info=chat_observe_info,
|
||||||
@@ -205,8 +252,6 @@ class SubMind:
|
|||||||
cycle_info_block=cycle_info_block,
|
cycle_info_block=cycle_info_block,
|
||||||
)
|
)
|
||||||
|
|
||||||
# logger.debug(f"[{self.subheartflow_id}] 心流思考提示词构建完成")
|
|
||||||
|
|
||||||
# ---------- 5. 执行LLM请求并处理响应 ----------
|
# ---------- 5. 执行LLM请求并处理响应 ----------
|
||||||
content = "" # 初始化内容变量
|
content = "" # 初始化内容变量
|
||||||
_reasoning_content = "" # 初始化推理内容变量
|
_reasoning_content = "" # 初始化推理内容变量
|
||||||
@@ -240,7 +285,7 @@ class SubMind:
|
|||||||
elif not success:
|
elif not success:
|
||||||
logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}")
|
logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}")
|
||||||
else:
|
else:
|
||||||
logger.info(f"{self.log_prefix} 心流未使用工具") # 修改日志信息,明确是未使用工具而不是未处理
|
logger.info(f"{self.log_prefix} 心流未使用工具")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# 处理总体异常
|
# 处理总体异常
|
||||||
@@ -248,15 +293,87 @@ class SubMind:
|
|||||||
logger.error(traceback.format_exc())
|
logger.error(traceback.format_exc())
|
||||||
content = "思考过程中出现错误"
|
content = "思考过程中出现错误"
|
||||||
|
|
||||||
# 记录最终思考结果
|
# 记录初步思考结果
|
||||||
logger.debug(f"{self.log_prefix} \nPrompt:\n{prompt}\n\n心流思考结果:\n{content}\n")
|
logger.debug(f"{self.log_prefix} 初步心流思考结果: {content}\nprompt: {prompt}\n")
|
||||||
|
|
||||||
# 处理空响应情况
|
# 处理空响应情况
|
||||||
if not content:
|
if not content:
|
||||||
content = "(不知道该想些什么...)"
|
content = "(不知道该想些什么...)"
|
||||||
logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。")
|
logger.warning(f"{self.log_prefix} LLM返回空结果,思考失败。")
|
||||||
|
|
||||||
# ---------- 6. 更新思考状态并返回结果 ----------
|
# ---------- 6. 应用概率性去重和修饰 ----------
|
||||||
|
new_content = content # 保存 LLM 直接输出的结果
|
||||||
|
try:
|
||||||
|
similarity = calculate_similarity(previous_mind, new_content)
|
||||||
|
replacement_prob = calculate_replacement_probability(similarity)
|
||||||
|
logger.debug(f"{self.log_prefix} 新旧想法相似度: {similarity:.2f}, 替换概率: {replacement_prob:.2f}")
|
||||||
|
|
||||||
|
# 定义词语列表 (移到判断之前)
|
||||||
|
yu_qi_ci_liebiao = ["嗯", "哦", "啊", "唉", "哈", "唔"]
|
||||||
|
zhuan_zhe_liebiao = ["但是", "不过", "然而", "可是", "只是"]
|
||||||
|
cheng_jie_liebiao = ["然后", "接着", "此外", "而且", "另外"]
|
||||||
|
zhuan_jie_ci_liebiao = zhuan_zhe_liebiao + cheng_jie_liebiao
|
||||||
|
|
||||||
|
if random.random() < replacement_prob:
|
||||||
|
# 相似度非常高时,尝试去重或特殊处理
|
||||||
|
if similarity == 1.0:
|
||||||
|
logger.debug(f"{self.log_prefix} 想法完全重复 (相似度 1.0),执行特殊处理...")
|
||||||
|
# 随机截取大约一半内容
|
||||||
|
if len(new_content) > 1: # 避免内容过短无法截取
|
||||||
|
split_point = max(1, len(new_content) // 2 + random.randint(-len(new_content)//4, len(new_content)//4))
|
||||||
|
truncated_content = new_content[:split_point]
|
||||||
|
else:
|
||||||
|
truncated_content = new_content # 如果只有一个字符或者为空,就不截取了
|
||||||
|
|
||||||
|
# 添加语气词和转折/承接词
|
||||||
|
yu_qi_ci = random.choice(yu_qi_ci_liebiao)
|
||||||
|
zhuan_jie_ci = random.choice(zhuan_jie_ci_liebiao)
|
||||||
|
content = f"{yu_qi_ci}{zhuan_jie_ci},{truncated_content}"
|
||||||
|
logger.debug(f"{self.log_prefix} 想法重复,特殊处理后: {content}")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# 相似度较高但非100%,执行标准去重逻辑
|
||||||
|
logger.debug(f"{self.log_prefix} 执行概率性去重 (概率: {replacement_prob:.2f})...")
|
||||||
|
matcher = difflib.SequenceMatcher(None, previous_mind, new_content)
|
||||||
|
deduplicated_parts = []
|
||||||
|
last_match_end_in_b = 0
|
||||||
|
for _i, j, n in matcher.get_matching_blocks():
|
||||||
|
if last_match_end_in_b < j:
|
||||||
|
deduplicated_parts.append(new_content[last_match_end_in_b:j])
|
||||||
|
last_match_end_in_b = j + n
|
||||||
|
|
||||||
|
deduplicated_content = "".join(deduplicated_parts).strip()
|
||||||
|
|
||||||
|
if deduplicated_content:
|
||||||
|
# 根据概率决定是否添加词语
|
||||||
|
prefix_str = ""
|
||||||
|
if random.random() < 0.3: # 30% 概率添加语气词
|
||||||
|
prefix_str += random.choice(yu_qi_ci_liebiao)
|
||||||
|
if random.random() < 0.7: # 70% 概率添加转折/承接词
|
||||||
|
prefix_str += random.choice(zhuan_jie_ci_liebiao)
|
||||||
|
|
||||||
|
# 组合最终结果
|
||||||
|
if prefix_str:
|
||||||
|
content = f"{prefix_str},{deduplicated_content}" # 更新 content
|
||||||
|
logger.debug(f"{self.log_prefix} 去重并添加引导词后: {content}")
|
||||||
|
else:
|
||||||
|
content = deduplicated_content # 更新 content
|
||||||
|
logger.debug(f"{self.log_prefix} 去重后 (未添加引导词): {content}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"{self.log_prefix} 去重后内容为空,保留原始LLM输出: {new_content}")
|
||||||
|
content = new_content # 保留原始 content
|
||||||
|
else:
|
||||||
|
logger.debug(f"{self.log_prefix} 未执行概率性去重 (概率: {replacement_prob:.2f})")
|
||||||
|
# content 保持 new_content 不变
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"{self.log_prefix} 应用概率性去重或特殊处理时出错: {e}")
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
# 出错时保留原始 content
|
||||||
|
content = new_content
|
||||||
|
|
||||||
|
# ---------- 7. 更新思考状态并返回结果 ----------
|
||||||
|
logger.info(f"{self.log_prefix} 最终心流思考结果: {content}")
|
||||||
# 更新当前思考内容
|
# 更新当前思考内容
|
||||||
self.update_current_mind(content)
|
self.update_current_mind(content)
|
||||||
|
|
||||||
|
|||||||
@@ -260,6 +260,9 @@ class PromptBuilder:
|
|||||||
relation_prompt = ""
|
relation_prompt = ""
|
||||||
for person in who_chat_in_group:
|
for person in who_chat_in_group:
|
||||||
relation_prompt += await relationship_manager.build_relationship_info(person)
|
relation_prompt += await relationship_manager.build_relationship_info(person)
|
||||||
|
print(f"relation_prompt: {relation_prompt}")
|
||||||
|
|
||||||
|
print(f"relat11111111ion_prompt: {relation_prompt}")
|
||||||
|
|
||||||
# 心情
|
# 心情
|
||||||
mood_manager = MoodManager.get_instance()
|
mood_manager = MoodManager.get_instance()
|
||||||
|
|||||||
@@ -137,34 +137,55 @@ class PersonInfoManager:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_json_from_text(text: str) -> dict:
|
def _extract_json_from_text(text: str) -> dict:
|
||||||
"""从文本中提取JSON数据的高容错方法"""
|
"""从文本中提取JSON数据的高容错方法"""
|
||||||
|
parsed_json = None
|
||||||
try:
|
try:
|
||||||
# 尝试直接解析
|
# 尝试直接解析
|
||||||
return json.loads(text)
|
parsed_json = json.loads(text)
|
||||||
|
# 如果解析结果是列表,尝试取第一个元素
|
||||||
|
if isinstance(parsed_json, list):
|
||||||
|
if parsed_json: # 检查列表是否为空
|
||||||
|
parsed_json = parsed_json[0]
|
||||||
|
else: # 如果列表为空,重置为 None,走后续逻辑
|
||||||
|
parsed_json = None
|
||||||
|
# 确保解析结果是字典
|
||||||
|
if isinstance(parsed_json, dict):
|
||||||
|
return parsed_json
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
try:
|
# 解析失败,继续尝试其他方法
|
||||||
# 尝试找到JSON格式的部分
|
pass
|
||||||
json_pattern = r"\{[^{}]*\}"
|
except Exception as e:
|
||||||
matches = re.findall(json_pattern, text)
|
logger.warning(f"尝试直接解析JSON时发生意外错误: {e}")
|
||||||
if matches:
|
pass # 继续尝试其他方法
|
||||||
return json.loads(matches[0])
|
|
||||||
|
|
||||||
# 如果上面都失败了,尝试提取键值对
|
# 如果直接解析失败或结果不是字典
|
||||||
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
|
try:
|
||||||
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
|
# 尝试找到JSON对象格式的部分
|
||||||
|
json_pattern = r"\{[^{}]*\}"
|
||||||
|
matches = re.findall(json_pattern, text)
|
||||||
|
if matches:
|
||||||
|
parsed_obj = json.loads(matches[0])
|
||||||
|
if isinstance(parsed_obj, dict): # 确保是字典
|
||||||
|
return parsed_obj
|
||||||
|
|
||||||
nickname_match = re.search(nickname_pattern, text)
|
# 如果上面都失败了,尝试提取键值对
|
||||||
reason_match = re.search(reason_pattern, text)
|
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
|
||||||
|
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
|
||||||
|
|
||||||
if nickname_match:
|
nickname_match = re.search(nickname_pattern, text)
|
||||||
return {
|
reason_match = re.search(reason_pattern, text)
|
||||||
"nickname": nickname_match.group(1),
|
|
||||||
"reason": reason_match.group(1) if reason_match else "未提供理由",
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"JSON提取失败: {str(e)}")
|
|
||||||
|
|
||||||
# 如果所有方法都失败了,返回空结果
|
if nickname_match:
|
||||||
return {"nickname": "", "reason": ""}
|
return {
|
||||||
|
"nickname": nickname_match.group(1),
|
||||||
|
"reason": reason_match.group(1) if reason_match else "未提供理由",
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"后备JSON提取失败: {str(e)}")
|
||||||
|
|
||||||
|
# 如果所有方法都失败了,返回默认字典
|
||||||
|
logger.warning(f"无法从文本中提取有效的JSON字典: {text}")
|
||||||
|
return {"nickname": "", "reason": ""}
|
||||||
|
|
||||||
async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
|
async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
|
||||||
"""给某个用户取名"""
|
"""给某个用户取名"""
|
||||||
|
|||||||
@@ -278,12 +278,19 @@ class RelationshipManager:
|
|||||||
|
|
||||||
return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
|
return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
|
||||||
|
|
||||||
async def build_relationship_info(self, person) -> str:
|
async def build_relationship_info(self, person, is_id: bool = False) -> str:
|
||||||
person_id = person_info_manager.get_person_id(person[0], person[1])
|
if is_id:
|
||||||
|
person_id = person
|
||||||
|
else:
|
||||||
|
print(f"person: {person}")
|
||||||
|
person_id = person_info_manager.get_person_id(person[0], person[1])
|
||||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||||
|
print(f"person_name: {person_name}")
|
||||||
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
|
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
|
||||||
level_num = self.calculate_level_num(relationship_value)
|
level_num = self.calculate_level_num(relationship_value)
|
||||||
|
|
||||||
|
print(f"person_name: {person_name}, relationship_value: {relationship_value}, level_num: {level_num}")
|
||||||
|
|
||||||
if level_num == 0 or level_num == 5:
|
if level_num == 0 or level_num == 5:
|
||||||
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
||||||
relation_prompt2_list = [
|
relation_prompt2_list = [
|
||||||
@@ -298,7 +305,7 @@ class RelationshipManager:
|
|||||||
elif level_num == 2:
|
elif level_num == 2:
|
||||||
return ""
|
return ""
|
||||||
else:
|
else:
|
||||||
if random.random() < 0.5:
|
if random.random() < 0.6:
|
||||||
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
relationship_level = ["厌恶", "冷漠以对", "认识", "友好对待", "喜欢", "暧昧"]
|
||||||
relation_prompt2_list = [
|
relation_prompt2_list = [
|
||||||
"忽视的回应",
|
"忽视的回应",
|
||||||
|
|||||||
@@ -364,3 +364,33 @@ async def build_readable_messages(
|
|||||||
else:
|
else:
|
||||||
# 理论上不应该发生,但作为保险
|
# 理论上不应该发生,但作为保险
|
||||||
return read_mark_line.strip() # 如果前后都无消息,只返回标记行
|
return read_mark_line.strip() # 如果前后都无消息,只返回标记行
|
||||||
|
|
||||||
|
|
||||||
|
async def get_person_id_list(messages: List[Dict[str, Any]]) -> List[str]:
|
||||||
|
"""
|
||||||
|
从消息列表中提取不重复的 person_id 列表 (忽略机器人自身)。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
messages: 消息字典列表。
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
一个包含唯一 person_id 的列表。
|
||||||
|
"""
|
||||||
|
person_ids_set = set() # 使用集合来自动去重
|
||||||
|
|
||||||
|
for msg in messages:
|
||||||
|
user_info = msg.get("user_info", {})
|
||||||
|
platform = user_info.get("platform")
|
||||||
|
user_id = user_info.get("user_id")
|
||||||
|
|
||||||
|
# 检查必要信息是否存在 且 不是机器人自己
|
||||||
|
if not all([platform, user_id]) or user_id == global_config.BOT_QQ:
|
||||||
|
continue
|
||||||
|
|
||||||
|
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||||
|
|
||||||
|
# 只有当获取到有效 person_id 时才添加
|
||||||
|
if person_id:
|
||||||
|
person_ids_set.add(person_id)
|
||||||
|
|
||||||
|
return list(person_ids_set) # 将集合转换为列表返回
|
||||||
|
|||||||
Reference in New Issue
Block a user