fix:修复合并问题

This commit is contained in:
SengokuCola
2025-05-16 23:16:47 +08:00
parent b4f6db0767
commit 61e0dbe372
13 changed files with 25 additions and 23 deletions

View File

@@ -351,7 +351,7 @@ class DefaultExpressor:
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
prompt_personality="",
reason=reason,
in_mind_reply=in_mind_reply,
@@ -363,7 +363,7 @@ class DefaultExpressor:
template_name,
sender_name=effective_sender_name, # Used in private template
chat_talking_prompt=chat_talking_prompt,
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
prompt_personality=prompt_personality,
reason=reason,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),

View File

@@ -7,7 +7,6 @@ from src.chat.person_info.relationship_manager import relationship_manager
from src.chat.utils.utils import get_embedding
import time
from typing import Union, Optional
from src.common.database import db
from src.chat.utils.utils import get_recent_group_speaker
from src.manager.mood_manager import mood_manager
from src.chat.memory_system.Hippocampus import HippocampusManager

View File

@@ -27,7 +27,7 @@ class ChattingInfoProcessor(BaseProcessor):
"""初始化观察处理器"""
super().__init__()
# TODO: API-Adapter修改标记
self.llm_summary = LLMRequest(
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
@@ -94,7 +94,7 @@ class ChattingInfoProcessor(BaseProcessor):
async def chat_compress(self, obs: ChattingObservation):
if obs.compressor_prompt:
try:
summary_result, _, _ = await self.llm_summary.generate_response(obs.compressor_prompt)
summary_result, _, _ = await self.model_summary.generate_response(obs.compressor_prompt)
summary = "没有主题的闲聊" # 默认值
if summary_result: # 确保结果不为空
summary = summary_result

View File

@@ -49,8 +49,8 @@ class SelfProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
model=global_config.model.sub_heartflow,
temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800,
request_type="self_identify",
)

View File

@@ -61,8 +61,8 @@ class WorkingMemoryProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
model=global_config.model.sub_heartflow,
temperature=global_config.model.sub_heartflow["temp"],
max_tokens=800,
request_type="working_memory",
)
@@ -118,7 +118,7 @@ class WorkingMemoryProcessor(BaseProcessor):
# 使用提示模板进行处理
prompt = (await global_prompt_manager.get_prompt_async("prompt_memory_proces")).format(
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
chat_observe_info=chat_info,
memory_str=memory_choose_str,

View File

@@ -69,7 +69,7 @@ class ActionPlanner:
self.log_prefix = log_prefix
# LLM规划器配置
self.planner_llm = LLMRequest(
model=global_config.llm_plan,
model=global_config.model.plan,
max_tokens=1000,
request_type="action_planning", # 用于动作规划
)
@@ -273,7 +273,7 @@ class ActionPlanner:
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
prompt = planner_prompt_template.format(
bot_name=global_config.BOT_NICKNAME,
bot_name=global_config.bot.nickname,
prompt_personality=personality_block,
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,

View File

@@ -33,7 +33,7 @@ class MemoryManager:
self._id_map: Dict[str, MemoryItem] = {}
self.llm_summarizer = LLMRequest(
model=global_config.llm_summary, temperature=0.3, max_tokens=512, request_type="memory_summarization"
model=global_config.model.summary, temperature=0.3, max_tokens=512, request_type="memory_summarization"
)
@property