This commit is contained in:
SengokuCola
2025-06-14 22:11:16 +08:00
3 changed files with 38 additions and 39 deletions

View File

@@ -678,16 +678,20 @@ class NormalChat:
"last_time": current_time,
"receive_count": 0,
"reply_count": 0,
"relation_built": False
"relation_built": False,
}
if is_reply:
self.engaging_persons[person_id]["reply_count"] += 1
logger.debug(f"[{self.stream_name}] 用户 {person_id} 回复次数更新: {self.engaging_persons[person_id]['reply_count']}")
logger.debug(
f"[{self.stream_name}] 用户 {person_id} 回复次数更新: {self.engaging_persons[person_id]['reply_count']}"
)
else:
self.engaging_persons[person_id]["receive_count"] += 1
self.engaging_persons[person_id]["last_time"] = current_time
logger.debug(f"[{self.stream_name}] 用户 {person_id} 消息次数更新: {self.engaging_persons[person_id]['receive_count']}")
logger.debug(
f"[{self.stream_name}] 用户 {person_id} 消息次数更新: {self.engaging_persons[person_id]['receive_count']}"
)
async def _check_relation_building_conditions(self):
@@ -789,9 +793,7 @@ class NormalChat:
# 调用关系管理器更新印象
relationship_manager = get_relationship_manager()
await relationship_manager.update_person_impression(
person_id=person_id,
timestamp=end_time,
bot_engaged_messages=messages
person_id=person_id, timestamp=end_time, bot_engaged_messages=messages
)
logger.info(f"[{self.stream_name}] 用户 {person_id} 关系构建完成")

View File

@@ -382,9 +382,14 @@ class ImageManager:
if existing_image:
# 检查是否缺少必要字段,如果缺少则创建新记录
if (not hasattr(existing_image, 'image_id') or not existing_image.image_id or
not hasattr(existing_image, 'count') or existing_image.count is None or
not hasattr(existing_image, 'vlm_processed') or existing_image.vlm_processed is None):
if (
not hasattr(existing_image, "image_id")
or not existing_image.image_id
or not hasattr(existing_image, "count")
or existing_image.count is None
or not hasattr(existing_image, "vlm_processed")
or existing_image.vlm_processed is None
):
logger.debug(f"图片记录缺少必要字段,补全旧记录: {image_hash}")
image_id = str(uuid.uuid4())
else:

View File

@@ -11,7 +11,6 @@ import json
from json_repair import repair_json
from datetime import datetime
from difflib import SequenceMatcher
import ast
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
@@ -431,9 +430,6 @@ class RelationshipManager:
await person_info_manager.update_one_field(person_id, "impression", compressed_summary)
compress_short_prompt = f"""
你的名字是{global_config.bot.nickname}{global_config.bot.nickname}的别名是{alias_str}
请不要混淆你自己和{global_config.bot.nickname}{person_name}
@@ -447,19 +443,15 @@ class RelationshipManager:
3.{person_name}的关键信息
请输出一段平文本,以陈诉自白的语气,输出你对{person_name}的概括,不要输出任何其他内容。
"""
compressed_short_summary, _ = await self.relationship_llm.generate_response_async(prompt=compress_short_prompt)
compressed_short_summary, _ = await self.relationship_llm.generate_response_async(
prompt=compress_short_prompt
)
# current_time = datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
# compressed_short_summary = f"截至{current_time},你对{person_name}的了解:{compressed_short_summary}"
await person_info_manager.update_one_field(person_id, "short_impression", compressed_short_summary)
forgotten_points = []
# 这句代码的作用是:将更新后的 forgotten_points遗忘的记忆点列表序列化为 JSON 字符串后,写回到数据库中的 forgotten_points 字段