fix:修复合并问题

This commit is contained in:
SengokuCola
2025-05-16 23:16:47 +08:00
parent b4f6db0767
commit 61e0dbe372
13 changed files with 25 additions and 23 deletions

View File

@@ -128,7 +128,7 @@ class APIBotConfig:
llm_reasoning: Dict[str, Any] # 推理模型配置
llm_normal: Dict[str, Any] # 普通模型配置
llm_topic_judge: Dict[str, Any] # 主题判断模型配置
llm_summary: Dict[str, Any] # 总结模型配置
model.summary: Dict[str, Any] # 总结模型配置
vlm: Dict[str, Any] # VLM模型配置
llm_heartflow: Dict[str, Any] # 心流模型配置
llm_observation: Dict[str, Any] # 观察模型配置
@@ -203,7 +203,7 @@ class APIBotConfig:
"llm_reasoning",
"llm_normal",
"llm_topic_judge",
"llm_summary",
"model.summary",
"vlm",
"llm_heartflow",
"llm_observation",

View File

@@ -351,7 +351,7 @@ class DefaultExpressor:
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
prompt_personality="",
reason=reason,
in_mind_reply=in_mind_reply,
@@ -363,7 +363,7 @@ class DefaultExpressor:
template_name,
sender_name=effective_sender_name, # Used in private template
chat_talking_prompt=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
prompt_personality=prompt_personality,
reason=reason,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),

View File

@@ -7,7 +7,6 @@ from src.chat.person_info.relationship_manager import relationship_manager
from src.chat.utils.utils import get_embedding
import time
from typing import Union, Optional
from src.common.database import db
from src.chat.utils.utils import get_recent_group_speaker
from src.manager.mood_manager import mood_manager
from src.chat.memory_system.Hippocampus import HippocampusManager

View File

@@ -27,7 +27,7 @@ class ChattingInfoProcessor(BaseProcessor):
"""初始化观察处理器"""
super().__init__()
# TODO: API-Adapter修改标记
self.llm_summary = LLMRequest(
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
@@ -94,7 +94,7 @@ class ChattingInfoProcessor(BaseProcessor):
async def chat_compress(self, obs: ChattingObservation):
if obs.compressor_prompt:
try:
summary_result, _, _ = await self.llm_summary.generate_response(obs.compressor_prompt)
summary_result, _, _ = await self.model_summary.generate_response(obs.compressor_prompt)
summary = "没有主题的闲聊" # 默认值
if summary_result: # 确保结果不为空
summary = summary_result

View File

@@ -49,8 +49,8 @@ class SelfProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
model=global_config.model.sub_heartflow,
temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800,
request_type="self_identify",
)

View File

@@ -61,8 +61,8 @@ class WorkingMemoryProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
model=global_config.model.sub_heartflow,
temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800,
request_type="working_memory",
)
@@ -118,7 +118,7 @@ class WorkingMemoryProcessor(BaseProcessor):
# 使用提示模板进行处理
prompt = (await global_prompt_manager.get_prompt_async("prompt_memory_proces")).format(
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
chat_observe_info=chat_info,
memory_str=memory_choose_str,

View File

@@ -69,7 +69,7 @@ class ActionPlanner:
self.log_prefix = log_prefix
# LLM规划器配置
self.planner_llm = LLMRequest(
model=global_config.llm_plan,
model=global_config.model.plan,
max_tokens=1000,
request_type="action_planning", # 用于动作规划
)
@@ -273,7 +273,7 @@ class ActionPlanner:
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format(
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
prompt_personality=personality_block,
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,

View File

@@ -33,7 +33,7 @@ class MemoryManager:
self._id_map: Dict[str, MemoryItem] = {}
self.llm_summarizer = LLMRequest(
model=global_config.llm_summary, temperature=0.3, max_tokens=512, request_type="memory_summarization"
model=global_config.model.summary, temperature=0.3, max_tokens=512, request_type="memory_summarization"
)
@property

View File

@@ -67,7 +67,7 @@ class ChattingObservation(Observation):
self.oldest_messages_str = ""
self.compressor_prompt = ""
# TODO: API-Adapter修改标记
self.llm_summary = LLMRequest(
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)

View File

@@ -193,7 +193,7 @@ class Hippocampus:
def __init__(self):
self.memory_graph = MemoryGraph()
self.llm_topic_judge = None
self.llm_summary = None
self.model_summary = None
self.entorhinal_cortex = None
self.parahippocampal_gyrus = None
@@ -205,7 +205,7 @@ class Hippocampus:
self.entorhinal_cortex.sync_memory_from_db()
# TODO: API-Adapter修改标记
self.llm_topic_judge = LLMRequest(global_config.model.topic_judge, request_type="memory")
self.llm_summary = LLMRequest(global_config.model.summary, request_type="memory")
self.model_summary = LLMRequest(global_config.model.summary, request_type="memory")
def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表"""
@@ -1167,7 +1167,7 @@ class ParahippocampalGyrus:
# 调用修改后的 topic_what不再需要 time_info
topic_what_prompt = self.hippocampus.topic_what(input_text, topic)
try:
task = self.hippocampus.llm_summary.generate_response_async(topic_what_prompt)
task = self.hippocampus.model_summary.generate_response_async(topic_what_prompt)
tasks.append((topic.strip(), task))
except Exception as e:
logger.error(f"生成话题 '{topic}' 的摘要时发生错误: {e}")

View File

@@ -72,6 +72,7 @@ class ChatBot:
message_data["message_info"]["user_info"]["user_id"] = str(
message_data["message_info"]["user_info"]["user_id"]
)
# print(message_data)
logger.trace(f"处理消息:{str(message_data)[:120]}...")
message = MessageRecv(message_data)
groupinfo = message.message_info.group_info
@@ -86,12 +87,14 @@ class ChatBot:
logger.trace("检测到私聊消息,检查")
# 好友黑名单拦截
if userinfo.user_id not in global_config.experimental.talk_allowed_private:
logger.debug(f"用户{userinfo.user_id}没有私聊权限")
# logger.debug(f"用户{userinfo.user_id}没有私聊权限")
return
# 群聊黑名单拦截
# print(groupinfo.group_id)
# print(global_config.chat_target.talk_allowed_groups)
if groupinfo is not None and groupinfo.group_id not in global_config.chat_target.talk_allowed_groups:
logger.trace(f"{groupinfo.group_id}被禁止回复")
logger.debug(f"{groupinfo.group_id}被禁止回复")
return
# 确认从接口发来的message是否有自定义的prompt模板信息

View File

@@ -77,7 +77,7 @@ class RelationshipManager:
@staticmethod
async def is_known_some_one(platform, user_id):
"""判断是否认识某人"""
is_known = person_info_manager.is_person_known(platform, user_id)
is_known = await person_info_manager.is_person_known(platform, user_id)
return is_known
@staticmethod

View File

@@ -316,7 +316,7 @@ class GoalAnalyzer:
# message_segment = Seg(type="text", data=content)
# bot_user_info = UserInfo(
# user_id=global_config.BOT_QQ,
# user_nickname=global_config.BOT_NICKNAME,
# user_nickname=global_config.bot.nickname,
# platform=chat_stream.platform,
# )