better:重构personinfo,使用Person类和类属性

This commit is contained in:
SengokuCola
2025-08-12 14:33:13 +08:00
parent 1e7f3a92a6
commit ae254de494
21 changed files with 468 additions and 1202 deletions

View File

@@ -17,7 +17,7 @@ from src.chat.planner_actions.action_manager import ActionManager
from src.chat.chat_loop.hfc_utils import CycleDetail
from src.person_info.relationship_builder_manager import relationship_builder_manager
from src.chat.express.expression_learner import expression_learner_manager
from src.person_info.person_info import get_person_info_manager
from src.person_info.person_info import Person
from src.person_info.group_relationship_manager import get_group_relationship_manager
from src.plugin_system.base.component_types import ChatMode, EventType
from src.plugin_system.core import events_manager
@@ -306,20 +306,14 @@ class HeartFChatting:
with Timer("回复发送", cycle_timers):
reply_text = await self._send_response(response_set, action_message)
# 存储reply action信息
person_info_manager = get_person_info_manager()
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
platform = action_message.get("chat_info_platform")
if platform is None:
platform = getattr(self.chat_stream, "platform", "unknown")
person_id = person_info_manager.get_person_id(
platform,
action_message.get("user_id", ""),
)
person_name = await person_info_manager.get_value(person_id, "person_name")
person = Person(platform = platform ,user_id = action_message.get("user_id", ""))
person_name = person.person_name
action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
await database_api.store_action_info(

View File

@@ -281,7 +281,7 @@ class ExpressionLearner:
logger.info(f"{group_name} 学习到表达风格:\n{learnt_expressions_str}")
if not learnt_expressions:
logger.info(f"没有学习到表达风格")
logger.info("没有学习到表达风格")
return []
# 按chat_id分组

View File

@@ -3,7 +3,7 @@ import time
import random
import hashlib
from typing import List, Dict, Tuple, Optional, Any
from typing import List, Dict, Optional, Any
from json_repair import repair_json
from src.llm_models.utils_model import LLMRequest
@@ -22,16 +22,22 @@ def init_prompt():
你的名字是{bot_name}{target_message}
你知道以下这些表达方式,梗和说话方式
以下是可选的表达情境
{all_situations}
现在,请你根据聊天记录从中挑选合适的表达方式,梗和说话方式,组织一条回复风格指导,指导的目的是在组织回复的时候提供一些语言风格和梗上的参考
请在reply_style_guide中以平文本输出指导不要浮夸并在selected_expressions中说明在指导中你挑选了哪些表达方式梗和说话方式以json格式输出
例子:
请你分析聊天内容的语境、情绪、话题类型,从上述情境中选择最适合当前聊天情境的,最多{max_num}个情境
考虑因素包括
1. 聊天的情绪氛围(轻松、严肃、幽默等)
2. 话题类型(日常、技术、游戏、情感等)
3. 情境与当前语境的匹配度
{target_message_extra_block}
请以JSON格式输出只需要输出选中的情境编号
例如:
{{
"reply_style_guide": "...",
"selected_expressions": [2, 3, 4, 7]
"selected_situations": [2, 3, 5, 7, 19]
}}
请严格按照JSON格式输出不要包含其他内容
"""
Prompt(expression_evaluation_prompt, "expression_evaluation_prompt")
@@ -190,14 +196,14 @@ class ExpressionSelector:
chat_info: str,
max_num: int = 10,
target_message: Optional[str] = None,
) -> Tuple[str, List[Dict[str, Any]]]:
) -> List[Dict[str, Any]]:
# sourcery skip: inline-variable, list-comprehension
"""使用LLM选择适合的表达方式"""
# 检查是否允许在此聊天流中使用表达
if not self.can_use_expression_for_chat(chat_id):
logger.debug(f"聊天流 {chat_id} 不允许使用表达,返回空列表")
return "", []
return []
# 1. 获取20个随机表达方式现在按权重抽取
style_exprs = self.get_random_expressions(chat_id, 10)
@@ -216,7 +222,7 @@ class ExpressionSelector:
if not all_expressions:
logger.warning("没有找到可用的表达方式")
return "", []
return []
all_situations_str = "\n".join(all_situations)
@@ -255,24 +261,23 @@ class ExpressionSelector:
if not content:
logger.warning("LLM返回空结果")
return "", []
return []
# 5. 解析结果
result = repair_json(content)
if isinstance(result, str):
result = json.loads(result)
if not isinstance(result, dict) or "reply_style_guide" not in result or "selected_expressions" not in result:
if not isinstance(result, dict) or "selected_situations" not in result:
logger.error("LLM返回格式错误")
logger.info(f"LLM返回结果: \n{content}")
return "", []
reply_style_guide = result["reply_style_guide"]
selected_expressions = result["selected_expressions"]
return []
selected_indices = result["selected_situations"]
# 根据索引获取完整的表达方式
valid_expressions = []
for idx in selected_expressions:
for idx in selected_indices:
if isinstance(idx, int) and 1 <= idx <= len(all_expressions):
expression = all_expressions[idx - 1] # 索引从1开始
valid_expressions.append(expression)
@@ -282,11 +287,11 @@ class ExpressionSelector:
self.update_expressions_count_batch(valid_expressions, 0.006)
# logger.info(f"LLM从{len(all_expressions)}个情境中选择了{len(valid_expressions)}个")
return reply_style_guide, valid_expressions
return valid_expressions
except Exception as e:
logger.error(f"LLM处理表达方式选择时出错: {e}")
return "", []
return []

View File

@@ -22,22 +22,16 @@ def init_prompt():
你的名字是{bot_name}{target_message}
以下是可选的表达情境
你知道以下这些表达方式梗和说话方式
{all_situations}
请你分析聊天内容的语境情绪话题类型从上述情境中选择最适合当前聊天情境的最多{max_num}个情境
考虑因素包括
1. 聊天的情绪氛围轻松严肃幽默等
2. 话题类型日常技术游戏情感等
3. 情境与当前语境的匹配度
{target_message_extra_block}
请以JSON格式输出只需要输出选中的情境编号
例如
现在请你根据聊天记录从中挑选合适的表达方式梗和说话方式组织一条回复风格指导指导的目的是在组织回复的时候提供一些语言风格和梗上的参考
请在reply_style_guide中以平文本输出指导不要浮夸并在selected_expressions中说明在指导中你挑选了哪些表达方式梗和说话方式以json格式输出
例子
{{
"selected_situations": [2, 3, 5, 7, 19]
"reply_style_guide": "...",
"selected_expressions": [2, 3, 4, 7]
}}
请严格按照JSON格式输出不要包含其他内容
"""
Prompt(expression_evaluation_prompt, "expression_evaluation_prompt")
@@ -196,14 +190,14 @@ class ExpressionSelector:
chat_info: str,
max_num: int = 10,
target_message: Optional[str] = None,
) -> List[Dict[str, Any]]:
) -> Tuple[str, List[Dict[str, Any]]]:
# sourcery skip: inline-variable, list-comprehension
"""使用LLM选择适合的表达方式"""
# 检查是否允许在此聊天流中使用表达
if not self.can_use_expression_for_chat(chat_id):
logger.debug(f"聊天流 {chat_id} 不允许使用表达,返回空列表")
return []
return "", []
# 1. 获取20个随机表达方式现在按权重抽取
style_exprs = self.get_random_expressions(chat_id, 10)
@@ -222,7 +216,7 @@ class ExpressionSelector:
if not all_expressions:
logger.warning("没有找到可用的表达方式")
return []
return "", []
all_situations_str = "\n".join(all_situations)
@@ -261,23 +255,24 @@ class ExpressionSelector:
if not content:
logger.warning("LLM返回空结果")
return []
return "", []
# 5. 解析结果
result = repair_json(content)
if isinstance(result, str):
result = json.loads(result)
if not isinstance(result, dict) or "selected_situations" not in result:
if not isinstance(result, dict) or "reply_style_guide" not in result or "selected_expressions" not in result:
logger.error("LLM返回格式错误")
logger.info(f"LLM返回结果: \n{content}")
return []
selected_indices = result["selected_situations"]
return "", []
reply_style_guide = result["reply_style_guide"]
selected_expressions = result["selected_expressions"]
# 根据索引获取完整的表达方式
valid_expressions = []
for idx in selected_indices:
for idx in selected_expressions:
if isinstance(idx, int) and 1 <= idx <= len(all_expressions):
expression = all_expressions[idx - 1] # 索引从1开始
valid_expressions.append(expression)
@@ -287,11 +282,11 @@ class ExpressionSelector:
self.update_expressions_count_batch(valid_expressions, 0.006)
# logger.info(f"LLM从{len(all_expressions)}个情境中选择了{len(valid_expressions)}个")
return valid_expressions
return reply_style_guide, valid_expressions
except Exception as e:
logger.error(f"LLM处理表达方式选择时出错: {e}")
return []
return "", []

View File

@@ -14,34 +14,14 @@ from src.chat.utils.utils import is_mentioned_bot_in_message
from src.chat.utils.timer_calculator import Timer
from src.chat.utils.chat_message_builder import replace_user_references_sync
from src.common.logger import get_logger
from src.person_info.relationship_manager import get_relationship_manager
from src.mood.mood_manager import mood_manager
from src.person_info.person_info import Person
if TYPE_CHECKING:
from src.chat.heart_flow.sub_heartflow import SubHeartflow
logger = get_logger("chat")
async def _process_relationship(message: MessageRecv) -> None:
"""处理用户关系逻辑
Args:
message: 消息对象,包含用户信息
"""
platform = message.message_info.platform
user_id = message.message_info.user_info.user_id # type: ignore
nickname = message.message_info.user_info.user_nickname # type: ignore
cardname = message.message_info.user_info.user_cardname or nickname # type: ignore
relationship_manager = get_relationship_manager()
is_known = await relationship_manager.is_known_some_one(platform, user_id)
if not is_known:
logger.info(f"首次认识用户: {nickname}")
await relationship_manager.first_knowing_some_one(platform, user_id, nickname, cardname) # type: ignore
async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool, list[str]]:
"""计算消息的兴趣度
@@ -165,7 +145,7 @@ class HeartFCMessageReceiver:
# 4. 关系处理
if global_config.relationship.enable_relationship:
await _process_relationship(message)
person = Person(platform=message.message_info.platform, user_id=message.message_info.user_info.user_id,nickname=userinfo.user_nickname)
except Exception as e:
logger.error(f"消息处理失败: {e}")

View File

@@ -9,7 +9,6 @@ from datetime import datetime
from src.mais4u.mai_think import mai_thinking_manager
from src.common.logger import get_logger
from src.config.config import global_config, model_config
from src.config.api_ada_configs import TaskConfig
from src.individuality.individuality import get_individuality
from src.llm_models.utils_model import LLMRequest
from src.chat.message_receive.message import UserInfo, Seg, MessageRecv, MessageSending
@@ -27,8 +26,7 @@ from src.chat.express.expression_selector import expression_selector
from src.chat.memory_system.memory_activator import MemoryActivator
from src.chat.memory_system.instant_memory import InstantMemory
from src.mood.mood_manager import mood_manager
from src.person_info.relationship_fetcher import relationship_fetcher_manager
from src.person_info.person_info import get_person_info_manager
from src.person_info.person_info import Person
from src.plugin_system.base.component_types import ActionInfo, EventType
from src.plugin_system.apis import llm_api
@@ -302,16 +300,14 @@ class DefaultReplyer:
if not global_config.relationship.enable_relationship:
return ""
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
# 获取用户ID
person_info_manager = get_person_info_manager()
person_id = person_info_manager.get_person_id_by_person_name(sender)
person = Person(platform=self.chat_stream.platform, user_id=sender)
person_id = person.person_id
if not person_id:
logger.warning(f"未找到用户 {sender} 的ID跳过信息提取")
return f"你完全不认识{sender}不理解ta的相关信息。"
return await relationship_fetcher.build_relation_info(person_id, points_num=5)
return person.build_relationship(points_num=5)
async def build_expression_habits(self, chat_history: str, target: str) -> Tuple[str, str]:
"""构建表达习惯块
@@ -330,7 +326,7 @@ class DefaultReplyer:
style_habits = []
# 使用从处理器传来的选中表达方式
# LLM模式调用LLM选择5-10个然后随机选5个
reply_style_guide, selected_expressions = await expression_selector.select_suitable_expressions_llm(
selected_expressions = await expression_selector.select_suitable_expressions_llm(
self.chat_stream.stream_id, chat_history, max_num=8, target_message=target
)
@@ -354,7 +350,7 @@ class DefaultReplyer:
)
expression_habits_block += f"{style_habits_str}\n"
return (f"{expression_habits_title}\n{expression_habits_block}", reply_style_guide)
return f"{expression_habits_title}\n{expression_habits_block}"
async def build_memory_block(self, chat_history: str, target: str) -> str:
"""构建记忆块
@@ -659,18 +655,16 @@ class DefaultReplyer:
available_actions = {}
chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
person_info_manager = get_person_info_manager()
is_group_chat = bool(chat_stream.group_info)
platform = chat_stream.platform
user_id = reply_message.get("user_id","")
if user_id:
person_id = person_info_manager.get_person_id(platform,user_id)
person_name = await person_info_manager.get_value(person_id, "person_name")
person = Person(platform=platform, user_id=user_id)
person_name = person.person_name or user_id
sender = person_name
target = reply_message.get('processed_plain_text')
else:
person_id = ""
person_name = "用户"
sender = "用户"
target = "消息"
@@ -746,7 +740,7 @@ class DefaultReplyer:
logger.warning(f"回复生成前信息获取耗时过长: {chinese_name} 耗时: {duration:.1f}s请使用更快的模型")
logger.info(f"在回复前的步骤耗时: {'; '.join(timing_logs)}")
(expression_habits_block, reply_style_guide) = results_dict["expression_habits"]
expression_habits_block = results_dict["expression_habits"]
relation_info = results_dict["relation_info"]
memory_block = results_dict["memory_block"]
tool_info = results_dict["tool_info"]
@@ -802,7 +796,7 @@ class DefaultReplyer:
if global_config.bot.qq_account == user_id and platform == global_config.bot.platform:
return await global_prompt_manager.format_prompt(
"replyer_self_prompt",
expression_habits_block=reply_style_guide,
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
memory_block=memory_block,
@@ -822,7 +816,7 @@ class DefaultReplyer:
else:
return await global_prompt_manager.format_prompt(
"replyer_prompt",
expression_habits_block=reply_style_guide,
expression_habits_block=expression_habits_block,
tool_info_block=tool_info,
knowledge_prompt=prompt_info,
memory_block=memory_block,
@@ -885,7 +879,6 @@ class DefaultReplyer:
self.build_relation_info(sender, target),
)
expression_habits_block, reply_style_guide = expression_habits_block
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)

View File

@@ -1,7 +1,6 @@
from typing import Dict, Optional, List, Tuple
from typing import Dict, Optional
from src.common.logger import get_logger
from src.config.api_ada_configs import TaskConfig
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
from src.chat.replyer.default_generator import DefaultReplyer

View File

@@ -9,7 +9,7 @@ from src.config.config import global_config
from src.common.message_repository import find_messages, count_messages
from src.common.database.database_model import ActionRecords
from src.common.database.database_model import Images
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
from src.person_info.person_info import Person,get_person_id
from src.chat.utils.utils import translate_timestamp_to_human_readable, assign_message_ids
install(extra_lines=3)
@@ -35,14 +35,12 @@ def replace_user_references_sync(
str: 处理后的内容字符串
"""
if name_resolver is None:
person_info_manager = get_person_info_manager()
def default_resolver(platform: str, user_id: str) -> str:
# 检查是否是机器人自己
if replace_bot_name and user_id == global_config.bot.qq_account:
return f"{global_config.bot.nickname}(你)"
person_id = PersonInfoManager.get_person_id(platform, user_id)
return person_info_manager.get_value_sync(person_id, "person_name") or user_id # type: ignore
person = Person(platform=platform, user_id=user_id)
return person.person_name or user_id # type: ignore
name_resolver = default_resolver
@@ -110,14 +108,12 @@ async def replace_user_references_async(
str: 处理后的内容字符串
"""
if name_resolver is None:
person_info_manager = get_person_info_manager()
async def default_resolver(platform: str, user_id: str) -> str:
# 检查是否是机器人自己
if replace_bot_name and user_id == global_config.bot.qq_account:
return f"{global_config.bot.nickname}(你)"
person_id = PersonInfoManager.get_person_id(platform, user_id)
return await person_info_manager.get_value(person_id, "person_name") or user_id # type: ignore
person = Person(platform=platform, user_id=user_id)
return person.person_name or user_id # type: ignore
name_resolver = default_resolver
@@ -506,14 +502,13 @@ def _build_readable_messages_internal(
if not all([platform, user_id, timestamp is not None]):
continue
person_id = PersonInfoManager.get_person_id(platform, user_id)
person_info_manager = get_person_info_manager()
person = Person(platform=platform, user_id=user_id)
# 根据 replace_bot_name 参数决定是否替换机器人名称
person_name: str
if replace_bot_name and user_id == global_config.bot.qq_account:
person_name = f"{global_config.bot.nickname}(你)"
else:
person_name = person_info_manager.get_value_sync(person_id, "person_name") # type: ignore
person_name = person.person_name or user_id # type: ignore
# 如果 person_name 未设置,则使用消息中的 nickname 或默认名称
if not person_name:
@@ -1009,7 +1004,7 @@ async def build_anonymous_messages(messages: List[Dict[str, Any]]) -> str:
# print("SELF11111111111111")
return "SELF"
try:
person_id = PersonInfoManager.get_person_id(platform, user_id)
person_id = get_person_id(platform, user_id)
except Exception as _e:
person_id = None
if not person_id:
@@ -1102,7 +1097,7 @@ async def get_person_id_list(messages: List[Dict[str, Any]]) -> List[str]:
if platform is None:
platform = "unknown"
if person_id := PersonInfoManager.get_person_id(platform, user_id):
if person_id := get_person_id(platform, user_id):
person_ids_set.add(person_id)
return list(person_ids_set) # 将集合转换为列表返回

View File

@@ -15,7 +15,7 @@ from src.config.config import global_config, model_config
from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.chat_stream import get_chat_manager
from src.llm_models.utils_model import LLMRequest
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
from src.person_info.person_info import Person
from .typo_generator import ChineseTypoGenerator
logger = get_logger("chat_utils")
@@ -639,12 +639,12 @@ def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]:
# Try to fetch person info
try:
# Assume get_person_id is sync (as per original code), keep using to_thread
person_id = PersonInfoManager.get_person_id(platform, user_id)
person = Person(platform=platform, user_id=user_id)
person_id = person.person_id
person_name = None
if person_id:
# get_value is async, so await it directly
person_info_manager = get_person_info_manager()
person_name = person_info_manager.get_value_sync(person_id, "person_name")
person_name = person.person_name
target_info["person_id"] = person_id
target_info["person_name"] = person_name