feat:添加激活和改写的最大上下文限制,修剪planner长度

This commit is contained in:
SengokuCola
2025-07-13 20:45:21 +08:00
parent 7d193fe37b
commit 11bef44901
7 changed files with 36 additions and 32 deletions

View File

@@ -585,7 +585,7 @@ class HeartFChatting:
reply_to=reply_to, reply_to=reply_to,
available_actions=available_actions, available_actions=available_actions,
enable_tool=global_config.tool.enable_in_normal_chat, enable_tool=global_config.tool.enable_in_normal_chat,
request_type="normal.replyer", request_type="chat.replyer.normal",
) )
if not success or not reply_set: if not success or not reply_set:

View File

@@ -199,7 +199,7 @@ class Hippocampus:
# 从数据库加载记忆图 # 从数据库加载记忆图
self.entorhinal_cortex.sync_memory_from_db() self.entorhinal_cortex.sync_memory_from_db()
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.model_summary = LLMRequest(global_config.model.memory, request_type="memory") self.model_summary = LLMRequest(global_config.model.memory, request_type="memory.builder")
def get_all_node_names(self) -> list: def get_all_node_names(self) -> list:
"""获取记忆图中所有节点的名字列表""" """获取记忆图中所有节点的名字列表"""

View File

@@ -66,7 +66,7 @@ class MemoryActivator:
self.key_words_model = LLMRequest( self.key_words_model = LLMRequest(
model=global_config.model.utils_small, model=global_config.model.utils_small,
temperature=0.5, temperature=0.5,
request_type="memory_activator", request_type="memory.activator",
) )
self.running_memory = [] self.running_memory = []

View File

@@ -71,7 +71,7 @@ class ActionModifier:
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
chat_id=self.chat_stream.stream_id, chat_id=self.chat_stream.stream_id,
timestamp=time.time(), timestamp=time.time(),
limit=int(global_config.chat.max_context_size * 0.5), limit=min(int(global_config.chat.max_context_size * 0.33), 10),
) )
chat_content = build_readable_messages( chat_content = build_readable_messages(
message_list_before_now_half, message_list_before_now_half,

View File

@@ -224,7 +224,7 @@ class ActionPlanner:
message_list_before_now = get_raw_msg_before_timestamp_with_chat( message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=self.chat_id, chat_id=self.chat_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.chat.max_context_size, limit=int(global_config.chat.max_context_size * 0.6),
) )
chat_content_block = build_readable_messages( chat_content_block = build_readable_messages(

View File

@@ -280,7 +280,7 @@ class DefaultReplyer:
# 加权随机选择一个模型配置 # 加权随机选择一个模型配置
selected_model_config = self._select_weighted_model_config() selected_model_config = self._select_weighted_model_config()
logger.info( logger.info(
f"{self.log_prefix} 使用模型配置进行重写: {selected_model_config.get('model_name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})" f"{self.log_prefix} 使用模型配置进行重写: {selected_model_config.get('name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})"
) )
express_model = LLMRequest( express_model = LLMRequest(
@@ -797,7 +797,7 @@ class DefaultReplyer:
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat( message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id, chat_id=chat_id,
timestamp=time.time(), timestamp=time.time(),
limit=int(global_config.chat.max_context_size * 0.5), limit=min(int(global_config.chat.max_context_size * 0.33), 15),
) )
chat_talking_prompt_half = build_readable_messages( chat_talking_prompt_half = build_readable_messages(
message_list_before_now_half, message_list_before_now_half,

View File

@@ -77,12 +77,14 @@ class ReplyAction(BaseAction):
sender, target = self._parse_reply_target(reply_to) sender, target = self._parse_reply_target(reply_to)
try: try:
prepared_reply = self.action_data.get("prepared_reply", "")
if not prepared_reply:
try: try:
success, reply_set, _ = await asyncio.wait_for( success, reply_set, _ = await asyncio.wait_for(
generator_api.generate_reply( generator_api.generate_reply(
action_data=self.action_data, action_data=self.action_data,
chat_id=self.chat_id, chat_id=self.chat_id,
request_type="focus.replyer", request_type="chat.replyer.focus",
enable_tool=global_config.tool.enable_in_focus_chat, enable_tool=global_config.tool.enable_in_focus_chat,
), ),
timeout=global_config.chat.thinking_timeout, timeout=global_config.chat.thinking_timeout,
@@ -103,6 +105,8 @@ class ReplyAction(BaseAction):
logger.info( logger.info(
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复" f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复"
) )
else:
reply_text = prepared_reply
# 构建回复文本 # 构建回复文本
reply_text = "" reply_text = ""