dev:修复记忆构建文本名称问题

This commit is contained in:
SengokuCola
2025-04-29 01:49:51 +08:00
parent f83e151d40
commit 286fef79c4
14 changed files with 494 additions and 367 deletions

View File

@@ -0,0 +1,145 @@
{
"测试时间": "2025-04-28 14:12:36",
"测试迭代次数": 10,
"不使用工具调用": {
"平均耗时": 4.596814393997192,
"最短耗时": 2.957131862640381,
"最长耗时": 10.121938705444336,
"标准差": 2.1705468730949593,
"所有耗时": [
3.18,
4.65,
10.12,
3.5,
4.46,
4.24,
3.23,
6.2,
2.96,
3.42
]
},
"不使用工具调用_详细响应": [
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"怎么啦?\",\n \"emoji_query\": \"友好地询问\"\n}\n```",
"推理内容摘要": ""
},
{
"内容摘要": "decide_reply_action(\n action=\"text_reply\",\n reasoning=\"千石连续两次叫了我的名字,显然是想引起我的注意或有事要说,作为礼貌应当回应\",\n emoji_query=\"友善的回应\"\n)",
"推理内容摘要": ""
},
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"怎么啦?我在呢~\",\n \"emoji_query\": \"友好的询问\",\n \"reasoning\": \"由于对方连续两次提到我的名字,显然是想与我交流,应当及时给予友好回应避免冷场\"\n}\n```",
"推理内容摘要": ""
},
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"在呢在呢~怎么啦?\",\n \"emoji_query\": \"好奇的回应\"\n}\n```",
"推理内容摘要": ""
},
{
"内容摘要": "decide_reply_action(\n action=\"text_reply\",\n reasoning=\"千石连续两次提到我的名字,显然需要我回应。文字回复更正式且能明确表示我在关注他的信息。\",\n emoji_query=\"友好的回应\"\n)",
"推理内容摘要": ""
},
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"reasoning\": \"千石连续两次提到我的名字,显然是需要我的回应。作为日常交流,应该给予友善简短的答复。\",\n \"emoji_query\": \"疑惑的歪头\"\n}\n```",
"推理内容摘要": ""
},
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"咋啦千石~\",\n \"emoji_query\": \"好奇的询问\"\n}\n```",
"推理内容摘要": ""
},
{
"内容摘要": "decide_reply_action\n```json\n{\n \"action\": \"text_reply\",\n \"content\": \"我在呢~怎么啦?\",\n \"emoji_query\": \"友好的关心\",\n \"reasoning\": \"千石连续两次呼唤我的名字显然是有事情要找我或想引起我的注意。根据回复原则2有人提到你但未回应应该用友善的文字进行回应并附上表达关心的表情符号来延...",
"推理内容摘要": ""
},
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"千石怎么啦~\",\n \"emoji_query\": \"好奇的探询\"\n}\n```",
"推理内容摘要": ""
},
{
"内容摘要": "```json\n{\n \"action\": \"text_reply\",\n \"content\": \"怎么啦?突然叫我两次\",\n \"emoji_query\": \"好奇的疑问\"\n}\n```",
"推理内容摘要": ""
}
],
"使用工具调用": {
"平均耗时": 8.139546775817871,
"最短耗时": 4.9980738162994385,
"最长耗时": 18.803313732147217,
"标准差": 4.008772720760647,
"所有耗时": [
5.81,
18.8,
6.06,
8.06,
10.07,
6.34,
7.9,
6.66,
5.0,
6.69
]
},
"使用工具调用_详细响应": [
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
},
{
"内容摘要": "",
"推理内容摘要": "",
"工具调用数量": 0,
"工具调用详情": []
}
],
"差异百分比": 77.07
}

View File

@@ -8,8 +8,8 @@ from src.plugins.moods.moods import MoodManager
logger = get_logger("mai_state")
# enable_unlimited_hfc_chat = True
enable_unlimited_hfc_chat = False
enable_unlimited_hfc_chat = True
# enable_unlimited_hfc_chat = False
class MaiState(enum.Enum):

View File

@@ -8,7 +8,7 @@ from src.individuality.individuality import Individuality
import random
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
from src.do_tool.tool_use import ToolUser
from src.plugins.utils.json_utils import safe_json_dumps, normalize_llm_response, process_llm_tool_calls
from src.plugins.utils.json_utils import safe_json_dumps, process_llm_tool_calls
from src.heart_flow.chat_state_info import ChatStateInfo
from src.plugins.chat.chat_stream import chat_manager
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
@@ -20,14 +20,12 @@ logger = get_logger("sub_heartflow")
def init_prompt():
prompt = ""
prompt += "{extra_info}\n"
prompt += "{prompt_personality}\n"
prompt += "你的名字是{bot_name},{prompt_personality}\n"
prompt += "{last_loop_prompt}\n"
prompt += "{cycle_info_block}\n"
prompt += "现在是{time_now}你正在上网和qq群里的网友们聊天以下是正在进行的聊天内容\n{chat_observe_info}\n"
prompt += "\n你现在{mood_info}\n"
prompt += (
"请仔细阅读当前群聊内容,分析讨论话题和群成员关系,分析你刚刚发言和别人对你的发言的反应,思考你要不要回复。"
)
prompt += "请仔细阅读当前群聊内容,分析讨论话题和群成员关系,分析你刚刚发言和别人对你的发言的反应,思考你要不要回复。然后思考你是否需要使用函数工具。"
prompt += "思考并输出你的内心想法\n"
prompt += "输出要求:\n"
prompt += "1. 根据聊天内容生成你的想法,{hf_do_next}\n"
@@ -80,8 +78,6 @@ class SubMind:
# 更新活跃时间
self.last_active_time = time.time()
# ---------- 1. 准备基础数据 ----------
# 获取现有想法和情绪状态
current_thinking_info = self.current_mind
@@ -106,18 +102,7 @@ class SubMind:
individuality = Individuality.get_instance()
# 构建个性部分
prompt_personality = f"你正在扮演名为{individuality.personality.bot_nickname}的人类,你"
prompt_personality += individuality.personality.personality_core
# 随机添加个性侧面
if individuality.personality.personality_sides:
random_side = random.choice(individuality.personality.personality_sides)
prompt_personality += f"{random_side}"
# 随机添加身份细节
if individuality.identity.identity_detail:
random_detail = random.choice(individuality.identity.identity_detail)
prompt_personality += f"{random_detail}"
prompt_personality = individuality.get_prompt(x_person=2, level=2)
# 获取当前时间
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
@@ -211,7 +196,7 @@ class SubMind:
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
extra_info="", # 可以在这里添加额外信息
prompt_personality=prompt_personality,
bot_name=individuality.personality.bot_nickname,
bot_name=individuality.name,
time_now=time_now,
chat_observe_info=chat_observe_info,
mood_info=mood_info,
@@ -228,7 +213,9 @@ class SubMind:
try:
# 调用LLM生成响应
response, _reasoning_content, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
response, _reasoning_content, tool_calls = await self.llm_model.generate_response_tool_async(
prompt=prompt, tools=tools
)
logger.debug(f"{self.log_prefix} 子心流输出的原始LLM响应: {response}")
@@ -238,7 +225,7 @@ class SubMind:
if tool_calls:
# 直接将 tool_calls 传递给处理函数
success, valid_tool_calls, error_msg = process_llm_tool_calls(
tool_calls, log_prefix=f"{self.log_prefix} "
tool_calls, log_prefix=f"{self.log_prefix} "
)
if success and valid_tool_calls:
@@ -246,16 +233,14 @@ class SubMind:
tool_calls_str = ", ".join(
[call.get("function", {}).get("name", "未知工具") for call in valid_tool_calls]
)
logger.info(
f"{self.log_prefix} 模型请求调用{len(valid_tool_calls)}个工具: {tool_calls_str}"
)
logger.info(f"{self.log_prefix} 模型请求调用{len(valid_tool_calls)}个工具: {tool_calls_str}")
# 收集工具执行结果
await self._execute_tool_calls(valid_tool_calls, tool_instance)
elif not success:
logger.warning(f"{self.log_prefix} 处理工具调用时出错: {error_msg}")
else:
logger.info(f"{self.log_prefix} 心流未使用工具") # 修改日志信息,明确是未使用工具而不是未处理
logger.info(f"{self.log_prefix} 心流未使用工具") # 修改日志信息,明确是未使用工具而不是未处理
except Exception as e:
# 处理总体异常

View File

@@ -1,6 +1,5 @@
from dataclasses import dataclass
from typing import List
import random
@dataclass
@@ -86,27 +85,6 @@ class Identity:
instance.appearance = appearance
return instance
def get_prompt(self, x_person, level):
"""
获取身份特征的prompt
"""
if x_person == 2:
prompt_identity = ""
elif x_person == 1:
prompt_identity = ""
else:
prompt_identity = ""
if level == 1:
identity_detail = self.identity_detail
random.shuffle(identity_detail)
prompt_identity += identity_detail[0]
elif level == 2:
for detail in self.identity_detail:
prompt_identity += f",{detail}"
prompt_identity += ""
return prompt_identity
def to_dict(self) -> dict:
"""将身份特征转换为字典格式"""
return {

View File

@@ -1,6 +1,7 @@
from typing import Optional
from .personality import Personality
from .identity import Identity
import random
class Individuality:
@@ -8,15 +9,16 @@ class Individuality:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if Individuality._instance is not None:
raise RuntimeError("Individuality 类是单例,请使用 get_instance() 方法获取实例。")
# 正常初始化实例属性
self.personality: Optional[Personality] = None
self.identity: Optional[Identity] = None
self.name = ""
@classmethod
def get_instance(cls) -> "Individuality":
"""获取Individuality单例实例
@@ -25,7 +27,13 @@ class Individuality:
Individuality: 单例实例
"""
if cls._instance is None:
cls._instance = cls()
# 实例不存在,调用 cls() 创建新实例
# cls() 会调用 __init__
# 因为此时 cls._instance 仍然是 None__init__ 会正常执行初始化
new_instance = cls()
# 将新创建的实例赋值给类变量 _instance
cls._instance = new_instance
# 返回(新创建的或已存在的)单例实例
return cls._instance
def initialize(
@@ -63,6 +71,8 @@ class Individuality:
identity_detail=identity_detail, height=height, weight=weight, age=age, gender=gender, appearance=appearance
)
self.name = bot_nickname
def to_dict(self) -> dict:
"""将个体特征转换为字典格式"""
return {
@@ -80,16 +90,148 @@ class Individuality:
instance.identity = Identity.from_dict(data["identity"])
return instance
def get_prompt(self, type, x_person, level):
def get_personality_prompt(self, level: int, x_person: int = 2) -> str:
"""
获取个体特征的prompt
获取人格特征的prompt
Args:
level (int): 详细程度 (1: 核心, 2: 核心+随机侧面, 3: 核心+所有侧面)
x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
Returns:
str: 生成的人格prompt字符串
"""
if type == "personality":
return self.personality.get_prompt(x_person, level)
elif type == "identity":
return self.identity.get_prompt(x_person, level)
if x_person not in [0, 1, 2]:
return "无效的人称代词,请使用 0 (无人称), 1 (我) 或 2 (你)。"
if not self.personality:
return "人格特征尚未初始化。"
if x_person == 2:
p_pronoun = ""
prompt_personality = f"{p_pronoun}{self.personality.personality_core}"
elif x_person == 1:
p_pronoun = ""
prompt_personality = f"{p_pronoun}{self.personality.personality_core}"
else: # x_person == 0
p_pronoun = "" # 无人称
# 对于无人称,直接描述核心特征
prompt_personality = f"{self.personality.personality_core}"
# 根据level添加人格侧面
if level >= 2 and self.personality.personality_sides:
personality_sides = list(self.personality.personality_sides)
random.shuffle(personality_sides)
if level == 2:
prompt_personality += f",有时也会{personality_sides[0]}"
elif level == 3:
sides_str = "".join(personality_sides)
prompt_personality += f",有时也会{sides_str}"
prompt_personality += ""
return prompt_personality
def get_identity_prompt(self, level: int, x_person: int = 2) -> str:
"""
获取身份特征的prompt
Args:
level (int): 详细程度 (1: 随机细节, 2: 所有细节+外貌年龄性别, 3: 同2)
x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
Returns:
str: 生成的身份prompt字符串
"""
if x_person not in [0, 1, 2]:
return "无效的人称代词,请使用 0 (无人称), 1 (我) 或 2 (你)。"
if not self.identity:
return "身份特征尚未初始化。"
if x_person == 2:
i_pronoun = ""
elif x_person == 1:
i_pronoun = ""
else: # x_person == 0
i_pronoun = "" # 无人称
identity_parts = []
# 根据level添加身份细节
if level >= 1 and self.identity.identity_detail:
identity_detail = list(self.identity.identity_detail)
random.shuffle(identity_detail)
if level == 1:
identity_parts.append(f"身份是{identity_detail[0]}")
elif level >= 2:
details_str = "".join(identity_detail)
identity_parts.append(f"身份是{details_str}")
# 根据level添加其他身份信息
if level >= 3:
if self.identity.appearance:
identity_parts.append(f"{self.identity.appearance}")
if self.identity.age > 0:
identity_parts.append(f"年龄大约{self.identity.age}")
if self.identity.gender:
identity_parts.append(f"性别是{self.identity.gender}")
if identity_parts:
details_str = "".join(identity_parts)
if x_person in [1, 2]:
return f"{i_pronoun}{details_str}"
else: # x_person == 0
# 无人称时,直接返回细节,不加代词和开头的逗号
return f"{details_str}"
else:
return ""
if x_person in [1, 2]:
return f"{i_pronoun}的身份信息不完整。"
else: # x_person == 0
return "身份信息不完整。"
def get_prompt(self, level: int, x_person: int = 2) -> str:
"""
获取合并的个体特征prompt
Args:
level (int): 详细程度 (1: 核心/随机细节, 2: 核心+侧面/细节+其他, 3: 全部)
x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
Returns:
str: 生成的合并prompt字符串
"""
if x_person not in [0, 1, 2]:
return "无效的人称代词,请使用 0 (无人称), 1 (我) 或 2 (你)。"
if not self.personality or not self.identity:
return "个体特征尚未完全初始化。"
# 调用新的独立方法
prompt_personality = self.get_personality_prompt(level, x_person)
prompt_identity = self.get_identity_prompt(level, x_person)
# 移除可能存在的错误信息,只合并有效的 prompt
valid_prompts = []
if "尚未初始化" not in prompt_personality and "无效的人称" not in prompt_personality:
valid_prompts.append(prompt_personality)
if (
"尚未初始化" not in prompt_identity
and "无效的人称" not in prompt_identity
and "信息不完整" not in prompt_identity
):
# 从身份 prompt 中移除代词和句号,以便更好地合并
identity_content = prompt_identity
if x_person == 2 and identity_content.startswith("你,"):
identity_content = identity_content[2:]
elif x_person == 1 and identity_content.startswith("我,"):
identity_content = identity_content[2:]
# 对于 x_person == 0身份提示不带前缀无需移除
if identity_content.endswith(""):
identity_content = identity_content[:-1]
valid_prompts.append(identity_content)
# --- 合并 Prompt ---
final_prompt = " ".join(valid_prompts)
return final_prompt.strip()
def get_traits(self, factor):
"""

View File

@@ -2,7 +2,6 @@ from dataclasses import dataclass
from typing import Dict, List
import json
from pathlib import Path
import random
@dataclass
@@ -119,28 +118,3 @@ class Personality:
for key, value in data.items():
setattr(instance, key, value)
return instance
def get_prompt(self, x_person, level):
# 开始构建prompt
if x_person == 2:
prompt_personality = ""
elif x_person == 1:
prompt_personality = ""
else:
prompt_personality = ""
# person
prompt_personality += self.personality_core
if level == 2:
personality_sides = self.personality_sides
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
elif level == 3:
personality_sides = self.personality_sides
for side in personality_sides:
prompt_personality += f",{side}"
prompt_personality += ""
return prompt_personality

View File

@@ -1,7 +1,6 @@
import asyncio
import time
import traceback
import random # <-- 添加导入
from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
from collections import deque
from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending
@@ -14,17 +13,20 @@ from src.plugins.models.utils_model import LLMRequest
from src.config.config import global_config
from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
from src.plugins.utils.timer_calculator import Timer # <--- Import Timer
from src.plugins.heartFC_chat.heartFC_generator import HeartFCGenerator
from src.do_tool.tool_use import ToolUser
from src.plugins.emoji_system.emoji_manager import emoji_manager
from src.plugins.utils.json_utils import process_llm_tool_calls, extract_tool_call_arguments
from src.heart_flow.sub_mind import SubMind
from src.heart_flow.observation import Observation
from src.plugins.heartFC_chat.heartflow_prompt_builder import global_prompt_manager
from src.plugins.heartFC_chat.heartflow_prompt_builder import global_prompt_manager, prompt_builder
import contextlib
from src.plugins.utils.chat_message_builder import num_new_messages_since
from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
from .heartFC_sender import HeartFCSender
from src.plugins.chat.utils import process_llm_response
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.plugins.moods.moods import MoodManager
from src.individuality.individuality import Individuality
INITIAL_DURATION = 60.0
@@ -181,12 +183,18 @@ class HeartFChatting:
self.action_manager = ActionManager()
# 初始化状态控制
self._initialized = False # 是否已初始化标志
self._processing_lock = asyncio.Lock() # 处理锁(确保单次Plan-Replier-Sender周期)
self._initialized = False
self._processing_lock = asyncio.Lock()
# 依赖注入存储
self.gpt_instance = HeartFCGenerator() # 文本回复生成器
self.tool_user = ToolUser() # 工具使用实例
# --- 移除 gpt_instance, 直接初始化 LLM 模型 ---
# self.gpt_instance = HeartFCGenerator() # <-- 移除
self.model_normal = LLMRequest( # <-- 新增 LLM 初始化
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_heartflow",
)
self.tool_user = ToolUser()
self.heart_fc_sender = HeartFCSender()
# LLM规划器配置
@@ -401,7 +409,6 @@ class HeartFChatting:
with Timer("决策", cycle_timers):
planner_result = await self._planner(current_mind, cycle_timers)
# 效果不太好还没处理replan导致观察时间点改变的问题
# action = planner_result.get("action", "error")
@@ -742,8 +749,8 @@ class HeartFChatting:
# --- 使用 LLM 进行决策 --- #
reasoning = "默认决策或获取决策失败"
llm_error = False # LLM错误标志
arguments = None # 初始化参数变量
emoji_query = "" # <--- 在这里初始化 emoji_query
arguments = None # 初始化参数变量
emoji_query = "" # <--- 在这里初始化 emoji_query
try:
# --- 构建提示词 ---
@@ -808,13 +815,13 @@ class HeartFChatting:
action = "no_reply"
reasoning = f"LLM返回了未授权的动作: {extracted_action}"
emoji_query = ""
llm_error = False # 视为非LLM错误只是逻辑修正
llm_error = False # 视为非LLM错误只是逻辑修正
else:
# 动作有效,使用提取的值
action = extracted_action
reasoning = arguments.get("reasoning", "未提供理由")
emoji_query = arguments.get("emoji_query", "")
llm_error = False # 成功处理
llm_error = False # 成功处理
# 记录决策结果
logger.debug(
f"{self.log_prefix}[要做什么]\nPrompt:\n{prompt}\n\n决策结果: {action}, 理由: {reasoning}, 表情查询: '{emoji_query}'"
@@ -822,13 +829,13 @@ class HeartFChatting:
elif tool_name != expected_tool_name:
reasoning = f"LLM返回了非预期的工具: {tool_name}"
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
else: # arguments is None
else: # arguments is None
reasoning = f"无法提取工具 {tool_name} 的参数"
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
elif not success:
reasoning = f"验证工具调用失败: {error_msg}"
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
else: # not valid_tool_calls
else: # not valid_tool_calls
reasoning = "LLM未返回有效的工具调用"
logger.warning(f"{self.log_prefix}[Planner] {reasoning}")
# 如果 llm_error 仍然是 True说明在处理过程中有错误发生
@@ -1058,9 +1065,13 @@ class HeartFChatting:
# 如果最近的活动循环不是文本回复,或者没有活动循环
cycle_info_block = "\n【近期回复历史】\n(最近没有连续文本回复)\n"
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(x_person=2, level=2)
# 获取提示词模板并填充数据
prompt = (await global_prompt_manager.get_prompt_async("planner_prompt")).format(
bot_name=global_config.BOT_NICKNAME,
prompt_personality=prompt_personality,
structured_info_block=structured_info_block,
chat_content_block=chat_content_block,
current_mind_block=current_mind_block,
@@ -1083,27 +1094,66 @@ class HeartFChatting:
thinking_id: str,
) -> Optional[List[str]]:
"""
回复器 (Replier): 核心逻辑用于生成回复。
回复器 (Replier): 核心逻辑,负责生成回复文本
(已整合原 HeartFCGenerator 的功能)
"""
response_set: Optional[List[str]] = None
try:
response_set = await self.gpt_instance.generate_response(
structured_info=self.sub_mind.structured_info,
current_mind_info=self.sub_mind.current_mind,
reason=reason,
message=anchor_message, # Pass anchor_message positionally (matches 'message' parameter)
thinking_id=thinking_id, # Pass thinking_id positionally
)
# 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
current_temp = global_config.llm_normal["temp"] * arousal_multiplier
self.model_normal.temperature = current_temp # 动态调整温度
if not response_set:
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM生成了一个空回复集。")
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
# 3. 构建 Prompt
with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await prompt_builder.build_prompt(
build_mode="focus",
reason=reason,
current_mind_info=self.sub_mind.current_mind,
structured_info=self.sub_mind.structured_info,
message_txt="", # 似乎是固定的空字符串
sender_name="", # 似乎是固定的空字符串
chat_stream=anchor_message.chat_stream,
)
# 4. 调用 LLM 生成回复
content = None
reasoning_content = None
model_name = "unknown_model"
try:
with Timer("LLM生成", {}): # 内部计时器,可选保留
content, reasoning_content, model_name = await self.model_normal.generate_response(prompt)
logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\\nPrompt:\\n{prompt}\\n生成回复: {content}\\n")
# 捕捉 LLM 输出信息
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
)
except Exception as llm_e:
# 精简报错信息
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成失败: {llm_e}")
return None # LLM 调用失败则无法生成回复
# 5. 处理 LLM 响应
if not content:
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成了空内容。")
return None
return response_set
with Timer("处理响应", {}): # 内部计时器,可选保留
processed_response = process_llm_response(content)
if not processed_response:
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] 处理后的回复为空。")
return None
return processed_response
except Exception as e:
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Unexpected error in replier_work: {e}")
logger.error(traceback.format_exc())
# 更通用的错误处理,精简信息
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] 回复生成意外失败: {e}")
# logger.error(traceback.format_exc()) # 可以取消注释这行以在调试时查看完整堆栈
return None
# --- Methods moved from HeartFCController start ---

View File

@@ -1,107 +0,0 @@
from typing import List, Optional
from ..models.utils_model import LLMRequest
from ...config.config import global_config
from ..chat.message import MessageRecv
from .heartflow_prompt_builder import prompt_builder
from ..chat.utils import process_llm_response
from src.common.logger_manager import get_logger
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from ..utils.timer_calculator import Timer
from src.plugins.moods.moods import MoodManager
logger = get_logger("llm")
class HeartFCGenerator:
def __init__(self):
self.model_normal = LLMRequest(
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_heartflow",
)
self.model_sum = LLMRequest(
model=global_config.llm_summary_by_topic, temperature=0.6, max_tokens=2000, request_type="relation"
)
self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model"
async def generate_response(
self,
structured_info: str,
current_mind_info: str,
reason: str,
message: MessageRecv,
thinking_id: str,
) -> Optional[List[str]]:
"""根据当前模型类型选择对应的生成函数"""
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
current_model = self.model_normal
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
model_response = await self._generate_response_with_model(
structured_info, current_mind_info, reason, message, current_model, thinking_id
)
if model_response:
model_processed_response = await self._process_response(model_response)
return model_processed_response
else:
logger.info(f"{self.current_model_type}思考,失败")
return None
async def _generate_response_with_model(
self,
structured_info: str,
current_mind_info: str,
reason: str,
message: MessageRecv,
model: LLMRequest,
thinking_id: str,
) -> str:
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
with Timer() as _build_prompt:
prompt = await prompt_builder.build_prompt(
build_mode="focus",
reason=reason,
current_mind_info=current_mind_info,
structured_info=structured_info,
message_txt="",
sender_name="",
chat_stream=message.chat_stream,
)
# logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
logger.info(f"\nprompt:{prompt}\n生成回复{content}\n")
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
)
except Exception:
logger.exception("生成回复时出错")
return None
return content
async def _process_response(self, content: str) -> List[str]:
"""处理响应内容,返回处理后的内容和情感标签"""
if not content:
return None
processed_response = process_llm_response(content)
# print(f"得到了处理后的llm返回{processed_response}")
return processed_response

View File

@@ -25,7 +25,7 @@ def init_prompt():
{chat_target}
{chat_talking_prompt}
现在你想要在群里发言或者回复。\n
你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality} {prompt_identity}"
你需要扮演一位网名叫{bot_name}的人进行回复,这个人的特点是:"{prompt_personality}"
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,你可以参考贴吧,知乎或者微博的回复风格。
看到以上聊天记录,你刚刚在想:
@@ -49,7 +49,7 @@ def init_prompt():
# Planner提示词 - 优化版
Prompt(
"""你的名字是{bot_name}一名大二女生正在QQ群聊中。需要基于以下信息决定如何参与对话:
"""你的名字是{bot_name},{prompt_personality},你现在正在一个群聊中。需要基于以下信息决定如何参与对话:
{structured_info_block}
{chat_content_block}
你的内心想法:
@@ -164,9 +164,7 @@ class PromptBuilder:
async def _build_prompt_focus(self, reason, current_mind_info, structured_info, chat_stream) -> tuple[str, str]:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
prompt_identity = individuality.get_prompt(type="identity", x_person=2, level=1)
prompt_personality = individuality.get_prompt(x_person=0, level=2)
# 日程构建
# schedule_prompt = f'''你现在正在做的事情是:{bot_schedule.get_current_num_task(num = 1,time_info = False)}'''
@@ -214,7 +212,6 @@ class PromptBuilder:
chat_talking_prompt=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME,
prompt_personality=prompt_personality,
prompt_identity=prompt_identity,
chat_target_2=await global_prompt_manager.get_prompt_async("chat_target_group2")
if chat_in_group
else await global_prompt_manager.get_prompt_async("chat_target_private2"),
@@ -230,21 +227,8 @@ class PromptBuilder:
return prompt
async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> tuple[str, str]:
# 开始构建prompt
prompt_personality = ""
# person
individuality = Individuality.get_instance()
personality_core = individuality.personality.personality_core
prompt_personality += personality_core
personality_sides = individuality.personality.personality_sides
random.shuffle(personality_sides)
prompt_personality += f",{personality_sides[0]}"
identity_detail = individuality.identity.identity_detail
random.shuffle(identity_detail)
prompt_personality += f",{identity_detail[0]}"
prompt_personality = individuality.get_prompt(x_person=2, level=2)
# 关系
who_chat_in_group = [

View File

@@ -14,51 +14,14 @@ from ...common.database import db
from ...plugins.models.utils_model import LLMRequest
from src.common.logger_manager import get_logger
from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
from ..utils.chat_message_builder import (
get_raw_msg_by_timestamp,
build_readable_messages,
) # 导入 build_readable_messages
from ..chat.utils import translate_timestamp_to_human_readable
from .memory_config import MemoryConfig
def get_closest_chat_from_db(length: int, timestamp: str):
# print(f"获取最接近指定时间戳的聊天记录,长度: {length}, 时间戳: {timestamp}")
# print(f"当前时间: {timestamp},转换后时间: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp))}")
chat_records = []
closest_record = db.messages.find_one({"time": {"$lte": timestamp}}, sort=[("time", -1)])
# print(f"最接近的记录: {closest_record}")
if closest_record:
closest_time = closest_record["time"]
chat_id = closest_record["chat_id"] # 获取chat_id
# 获取该时间戳之后的length条消息保持相同的chat_id
chat_records = list(
db.messages.find(
{
"time": {"$gt": closest_time},
"chat_id": chat_id, # 添加chat_id过滤
}
)
.sort("time", 1)
.limit(length)
)
# print(f"获取到的记录: {chat_records}")
length = len(chat_records)
# print(f"获取到的记录长度: {length}")
# 转换记录格式
formatted_records = []
for record in chat_records:
# 兼容行为,前向兼容老数据
formatted_records.append(
{
"_id": record["_id"],
"time": record["time"],
"chat_id": record["chat_id"],
"detailed_plain_text": record.get("detailed_plain_text", ""), # 添加文本内容
"memorized_times": record.get("memorized_times", 0), # 添加记忆次数
}
)
return formatted_records
return []
def calculate_information_content(text):
"""计算文本的信息量(熵)"""
char_count = Counter(text)
@@ -263,16 +226,17 @@ class Hippocampus:
@staticmethod
def find_topic_llm(text, topic_num):
prompt = (
f"这是一段文字:{text}请你从这段话中总结出最多{topic_num}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,"
f"这是一段文字:\n{text}\n\n请你从这段话中总结出最多{topic_num}个关键的概念,可以是名词,动词,或者特定人物,帮我列出来,"
f"将主题用逗号隔开,并加上<>,例如<主题1>,<主题2>......尽可能精简。只需要列举最多{topic_num}个话题就好,不要有序号,不要告诉我其他内容。"
f"如果确定找不出主题或者没有明显主题,返回<none>。"
)
return prompt
@staticmethod
def topic_what(text, topic, time_info):
def topic_what(text, topic):
# 不再需要 time_info 参数
prompt = (
f'这是一段文字{time_info}{text}我想让你基于这段文字来概括"{topic}"这个概念,帮我总结成一句自然的话,'
f'这是一段文字:\n{text}\n\n我想让你基于这段文字来概括"{topic}"这个概念,帮我总结成一句自然的话,'
f"可以包含时间和人物,以及具体的观点。只输出这句话就好"
)
return prompt
@@ -845,9 +809,12 @@ class EntorhinalCortex:
)
timestamps = sample_scheduler.get_timestamp_array()
logger.info(f"回忆往事: {[time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts)) for ts in timestamps]}")
# 使用 translate_timestamp_to_human_readable 并指定 mode="normal"
readable_timestamps = [translate_timestamp_to_human_readable(ts, mode="normal") for ts in timestamps]
logger.info(f"回忆往事: {readable_timestamps}")
chat_samples = []
for timestamp in timestamps:
# 调用修改后的 random_get_msg_snippet
messages = self.random_get_msg_snippet(
timestamp, self.config.build_memory_sample_length, max_memorized_time_per_msg
)
@@ -862,22 +829,45 @@ class EntorhinalCortex:
@staticmethod
def random_get_msg_snippet(target_timestamp: float, chat_size: int, max_memorized_time_per_msg: int) -> list:
"""从数据库中随机获取指定时间戳附近的消息片段"""
"""从数据库中随机获取指定时间戳附近的消息片段 (使用 chat_message_builder)"""
try_count = 0
time_window_seconds = random.randint(300, 1800) # 随机时间窗口5到30分钟
while try_count < 3:
messages = get_closest_chat_from_db(length=chat_size, timestamp=target_timestamp)
# 定义时间范围:从目标时间戳开始,向后推移 time_window_seconds
timestamp_start = target_timestamp
timestamp_end = target_timestamp + time_window_seconds
# 使用 chat_message_builder 的函数获取消息
# limit_mode='earliest' 获取这个时间窗口内最早的 chat_size 条消息
messages = get_raw_msg_by_timestamp(
timestamp_start=timestamp_start, timestamp_end=timestamp_end, limit=chat_size, limit_mode="earliest"
)
if messages:
# 检查获取到的所有消息是否都未达到最大记忆次数
all_valid = True
for message in messages:
if message["memorized_times"] >= max_memorized_time_per_msg:
messages = None
if message.get("memorized_times", 0) >= max_memorized_time_per_msg:
all_valid = False
break
if messages:
# 如果所有消息都有效
if all_valid:
# 更新数据库中的记忆次数
for message in messages:
# 确保在更新前获取最新的 memorized_times以防万一
current_memorized_times = message.get("memorized_times", 0)
db.messages.update_one(
{"_id": message["_id"]}, {"$set": {"memorized_times": message["memorized_times"] + 1}}
{"_id": message["_id"]}, {"$set": {"memorized_times": current_memorized_times + 1}}
)
return messages
return messages # 直接返回原始的消息列表
# 如果获取失败或消息无效,增加尝试次数
try_count += 1
target_timestamp -= 120 # 如果第一次尝试失败,稍微向前调整时间戳再试
# 三次尝试都失败,返回 None
return None
async def sync_memory_to_db(self):
@@ -1113,86 +1103,70 @@ class ParahippocampalGyrus:
"""压缩和总结消息内容,生成记忆主题和摘要。
Args:
messages (list): 消息列表,每个消息是一个字典,包含以下字段:
- time: float, 消息的时间戳
- detailed_plain_text: str, 消息的详细文本内容
messages (list): 消息列表,每个消息是一个字典,包含数据库消息结构。
compress_rate (float, optional): 压缩率用于控制生成的主题数量。默认为0.1。
Returns:
tuple: (compressed_memory, similar_topics_dict)
- compressed_memory: set, 压缩后的记忆集合,每个元素是一个元组 (topic, summary)
- topic: str, 记忆主题
- summary: str, 主题的摘要描述
- similar_topics_dict: dict, 相似主题字典key为主题value为相似主题列表
每个相似主题是一个元组 (similar_topic, similarity)
- similar_topic: str, 相似的主题
- similarity: float, 相似度分数0-1之间
- similar_topics_dict: dict, 相似主题字典
Process:
1. 合并消息文本并生成时间信息
2. 使用LLM提取关键主题
3. 过滤掉包含禁用关键词的主题
4. 为每个主题生成摘要
5. 查找与现有记忆中的相似主题
1. 使用 build_readable_messages 生成包含时间、人物信息的格式化文本。
2. 使用LLM提取关键主题
3. 过滤掉包含禁用关键词的主题
4. 为每个主题生成摘要
5. 查找与现有记忆中的相似主题
"""
if not messages:
return set(), {}
# 合并消息文本,同时保留时间信息
input_text = ""
time_info = ""
# 计算最早和最晚时间
earliest_time = min(msg["time"] for msg in messages)
latest_time = max(msg["time"] for msg in messages)
# 1. 使用 build_readable_messages 生成格式化文本
# build_readable_messages 只返回一个字符串,不需要解包
input_text = await build_readable_messages(
messages,
merge_messages=True, # 合并连续消息
timestamp_mode="normal", # 使用 'YYYY-MM-DD HH:MM:SS' 格式
replace_bot_name=False, # 保留原始用户名
)
earliest_dt = datetime.datetime.fromtimestamp(earliest_time)
latest_dt = datetime.datetime.fromtimestamp(latest_time)
# 如果生成的可读文本为空(例如所有消息都无效),则直接返回
if not input_text:
logger.warning("无法从提供的消息生成可读文本,跳过记忆压缩。")
return set(), {}
# 如果是同一年
if earliest_dt.year == latest_dt.year:
earliest_str = earliest_dt.strftime("%m-%d %H:%M:%S")
latest_str = latest_dt.strftime("%m-%d %H:%M:%S")
time_info += f"是在{earliest_dt.year}年,{earliest_str}{latest_str} 的对话:\n"
else:
earliest_str = earliest_dt.strftime("%Y-%m-%d %H:%M:%S")
latest_str = latest_dt.strftime("%Y-%m-%d %H:%M:%S")
time_info += f"是从 {earliest_str}{latest_str} 的对话:\n"
for msg in messages:
input_text += f"{msg['detailed_plain_text']}\n"
logger.debug(input_text)
logger.debug(f"用于压缩的格式化文本:\n{input_text}")
# 2. 使用LLM提取关键主题
topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate)
topics_response = await self.hippocampus.llm_topic_judge.generate_response(
self.hippocampus.find_topic_llm(input_text, topic_num)
)
# 使用正则表达式提取<>中的内容
# 提取<>中的内容
topics = re.findall(r"<([^>]+)>", topics_response[0])
# 如果没有找到<>包裹的内容,返回['none']
if not topics:
topics = ["none"]
else:
# 处理提取出的话题
topics = [
topic.strip()
for topic in ",".join(topics).replace("", ",").replace("", ",").replace(" ", ",").split(",")
if topic.strip()
]
# 过滤掉包含禁用关键词的topic
# 3. 过滤掉包含禁用关键词的topic
filtered_topics = [
topic for topic in topics if not any(keyword in topic for keyword in self.config.memory_ban_words)
]
logger.debug(f"过滤后话题: {filtered_topics}")
# 创建所有话题的请求任务
# 4. 创建所有话题的摘要生成任务
tasks = []
for topic in filtered_topics:
topic_what_prompt = self.hippocampus.topic_what(input_text, topic, time_info)
# 调用修改后的 topic_what不再需要 time_info
topic_what_prompt = self.hippocampus.topic_what(input_text, topic)
try:
task = self.hippocampus.llm_summary_by_topic.generate_response_async(topic_what_prompt)
tasks.append((topic.strip(), task))

View File

@@ -750,7 +750,6 @@ class LLMRequest:
"tools": tools,
}
response = await self._execute_request(endpoint="/chat/completions", payload=data, prompt=prompt)
logger.debug(f"向模型 {self.model_name} 发送工具调用请求,包含 {len(tools)} 个工具,返回结果: {response}")
# 检查响应是否包含工具调用

View File

@@ -180,10 +180,10 @@ class PersonInfoManager:
existing_names = ""
while current_try < max_retries:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
prompt_personality = individuality.get_prompt(x_person=2, level=1)
bot_name = individuality.personality.bot_nickname
qv_name_prompt = f"你是{bot_name}{prompt_personality}"
qv_name_prompt = f"你是{bot_name}{prompt_personality}"
qv_name_prompt += f"现在你想给一个用户取一个昵称用户是的qq昵称是{user_nickname}"
qv_name_prompt += f"用户的qq群昵称名是{user_cardname}"
if user_avatar:

View File

@@ -1,6 +1,6 @@
import json
import logging
from typing import Any, Dict, TypeVar, List, Union, Callable, Tuple
from typing import Any, Dict, TypeVar, List, Union, Tuple
# 定义类型变量用于泛型类型提示
T = TypeVar("T")
@@ -70,7 +70,6 @@ def extract_tool_call_arguments(tool_call: Dict[str, Any], default_value: Dict[s
return default_result
def safe_json_dumps(obj: Any, default_value: str = "{}", ensure_ascii: bool = False, pretty: bool = False) -> str:
"""
安全地将Python对象序列化为JSON字符串
@@ -95,8 +94,6 @@ def safe_json_dumps(obj: Any, default_value: str = "{}", ensure_ascii: bool = Fa
return default_value
def normalize_llm_response(response: Any, log_prefix: str = "") -> Tuple[bool, List[Any], str]:
"""
标准化LLM响应格式将各种格式如元组转换为统一的列表格式
@@ -140,7 +137,9 @@ def normalize_llm_response(response: Any, log_prefix: str = "") -> Tuple[bool, L
return True, response, ""
def process_llm_tool_calls(tool_calls: List[Dict[str, Any]], log_prefix: str = "") -> Tuple[bool, List[Dict[str, Any]], str]:
def process_llm_tool_calls(
tool_calls: List[Dict[str, Any]], log_prefix: str = ""
) -> Tuple[bool, List[Dict[str, Any]], str]:
"""
处理并验证LLM响应中的工具调用列表
@@ -165,7 +164,9 @@ def process_llm_tool_calls(tool_calls: List[Dict[str, Any]], log_prefix: str = "
# 检查基本结构
if tool_call.get("type") != "function":
logger.warning(f"{log_prefix}工具调用[{i}]不是function类型: type={tool_call.get('type', '未定义')}, 内容: {tool_call}")
logger.warning(
f"{log_prefix}工具调用[{i}]不是function类型: type={tool_call.get('type', '未定义')}, 内容: {tool_call}"
)
continue
if "function" not in tool_call or not isinstance(tool_call.get("function"), dict):
@@ -176,16 +177,20 @@ def process_llm_tool_calls(tool_calls: List[Dict[str, Any]], log_prefix: str = "
if "name" not in func_details or not isinstance(func_details.get("name"), str):
logger.warning(f"{log_prefix}工具调用[{i}]的'function'字段缺少'name'或类型不正确: {func_details}")
continue
if "arguments" not in func_details or not isinstance(func_details.get("arguments"), str): # 参数是字符串形式的JSON
if "arguments" not in func_details or not isinstance(
func_details.get("arguments"), str
): # 参数是字符串形式的JSON
logger.warning(f"{log_prefix}工具调用[{i}]的'function'字段缺少'arguments'或类型不正确: {func_details}")
continue
# 可选尝试解析参数JSON确保其有效
args_str = func_details["arguments"]
try:
json.loads(args_str) # 尝试解析,但不存储结果
json.loads(args_str) # 尝试解析,但不存储结果
except json.JSONDecodeError as e:
logger.warning(f"{log_prefix}工具调用[{i}]的'arguments'不是有效的JSON字符串: {e}, 内容: {args_str[:100]}...")
logger.warning(
f"{log_prefix}工具调用[{i}]的'arguments'不是有效的JSON字符串: {e}, 内容: {args_str[:100]}..."
)
continue
except Exception as e:
logger.warning(f"{log_prefix}解析工具调用[{i}]的'arguments'时发生意外错误: {e}, 内容: {args_str[:100]}...")
@@ -193,7 +198,7 @@ def process_llm_tool_calls(tool_calls: List[Dict[str, Any]], log_prefix: str = "
valid_tool_calls.append(tool_call)
if not valid_tool_calls and tool_calls: # 如果原始列表不为空,但验证后为空
if not valid_tool_calls and tool_calls: # 如果原始列表不为空,但验证后为空
return False, [], "所有工具调用格式均无效"
return True, valid_tool_calls, ""

View File

@@ -48,12 +48,10 @@ personality_sides = [
identity_detail = [
"身份特点",
"身份特点",
]# 条数任意不能为0, 该选项还在调试中,可能未完全生效
]# 条数任意不能为0, 该选项还在调试中
#外貌特征
height = 170 # 身高 单位厘米 该选项还在调试中,暂时未生效
weight = 50 # 体重 单位千克 该选项还在调试中,暂时未生效
age = 20 # 年龄 单位岁 该选项还在调试中,暂时未生效
gender = "男" # 性别 该选项还在调试中,暂时未生效
age = 20 # 年龄 单位
gender = "男" # 性别
appearance = "用几句话描述外貌特征" # 外貌特征 该选项还在调试中,暂时未生效
[schedule]