feat:添加激活和改写的最大上下文限制,修剪planner长度
This commit is contained in:
@@ -585,7 +585,7 @@ class HeartFChatting:
|
|||||||
reply_to=reply_to,
|
reply_to=reply_to,
|
||||||
available_actions=available_actions,
|
available_actions=available_actions,
|
||||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||||
request_type="normal.replyer",
|
request_type="chat.replyer.normal",
|
||||||
)
|
)
|
||||||
|
|
||||||
if not success or not reply_set:
|
if not success or not reply_set:
|
||||||
|
|||||||
@@ -199,7 +199,7 @@ class Hippocampus:
|
|||||||
# 从数据库加载记忆图
|
# 从数据库加载记忆图
|
||||||
self.entorhinal_cortex.sync_memory_from_db()
|
self.entorhinal_cortex.sync_memory_from_db()
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.model_summary = LLMRequest(global_config.model.memory, request_type="memory")
|
self.model_summary = LLMRequest(global_config.model.memory, request_type="memory.builder")
|
||||||
|
|
||||||
def get_all_node_names(self) -> list:
|
def get_all_node_names(self) -> list:
|
||||||
"""获取记忆图中所有节点的名字列表"""
|
"""获取记忆图中所有节点的名字列表"""
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ class MemoryActivator:
|
|||||||
self.key_words_model = LLMRequest(
|
self.key_words_model = LLMRequest(
|
||||||
model=global_config.model.utils_small,
|
model=global_config.model.utils_small,
|
||||||
temperature=0.5,
|
temperature=0.5,
|
||||||
request_type="memory_activator",
|
request_type="memory.activator",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.running_memory = []
|
self.running_memory = []
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ class ActionModifier:
|
|||||||
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
||||||
chat_id=self.chat_stream.stream_id,
|
chat_id=self.chat_stream.stream_id,
|
||||||
timestamp=time.time(),
|
timestamp=time.time(),
|
||||||
limit=int(global_config.chat.max_context_size * 0.5),
|
limit=min(int(global_config.chat.max_context_size * 0.33), 10),
|
||||||
)
|
)
|
||||||
chat_content = build_readable_messages(
|
chat_content = build_readable_messages(
|
||||||
message_list_before_now_half,
|
message_list_before_now_half,
|
||||||
|
|||||||
@@ -224,7 +224,7 @@ class ActionPlanner:
|
|||||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||||
chat_id=self.chat_id,
|
chat_id=self.chat_id,
|
||||||
timestamp=time.time(),
|
timestamp=time.time(),
|
||||||
limit=global_config.chat.max_context_size,
|
limit=int(global_config.chat.max_context_size * 0.6),
|
||||||
)
|
)
|
||||||
|
|
||||||
chat_content_block = build_readable_messages(
|
chat_content_block = build_readable_messages(
|
||||||
|
|||||||
@@ -280,7 +280,7 @@ class DefaultReplyer:
|
|||||||
# 加权随机选择一个模型配置
|
# 加权随机选择一个模型配置
|
||||||
selected_model_config = self._select_weighted_model_config()
|
selected_model_config = self._select_weighted_model_config()
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.log_prefix} 使用模型配置进行重写: {selected_model_config.get('model_name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})"
|
f"{self.log_prefix} 使用模型配置进行重写: {selected_model_config.get('name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})"
|
||||||
)
|
)
|
||||||
|
|
||||||
express_model = LLMRequest(
|
express_model = LLMRequest(
|
||||||
@@ -797,7 +797,7 @@ class DefaultReplyer:
|
|||||||
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
||||||
chat_id=chat_id,
|
chat_id=chat_id,
|
||||||
timestamp=time.time(),
|
timestamp=time.time(),
|
||||||
limit=int(global_config.chat.max_context_size * 0.5),
|
limit=min(int(global_config.chat.max_context_size * 0.33), 15),
|
||||||
)
|
)
|
||||||
chat_talking_prompt_half = build_readable_messages(
|
chat_talking_prompt_half = build_readable_messages(
|
||||||
message_list_before_now_half,
|
message_list_before_now_half,
|
||||||
|
|||||||
@@ -77,32 +77,36 @@ class ReplyAction(BaseAction):
|
|||||||
sender, target = self._parse_reply_target(reply_to)
|
sender, target = self._parse_reply_target(reply_to)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
try:
|
prepared_reply = self.action_data.get("prepared_reply", "")
|
||||||
success, reply_set, _ = await asyncio.wait_for(
|
if not prepared_reply:
|
||||||
generator_api.generate_reply(
|
try:
|
||||||
action_data=self.action_data,
|
success, reply_set, _ = await asyncio.wait_for(
|
||||||
chat_id=self.chat_id,
|
generator_api.generate_reply(
|
||||||
request_type="focus.replyer",
|
action_data=self.action_data,
|
||||||
enable_tool=global_config.tool.enable_in_focus_chat,
|
chat_id=self.chat_id,
|
||||||
),
|
request_type="chat.replyer.focus",
|
||||||
timeout=global_config.chat.thinking_timeout,
|
enable_tool=global_config.tool.enable_in_focus_chat,
|
||||||
|
),
|
||||||
|
timeout=global_config.chat.thinking_timeout,
|
||||||
|
)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
logger.warning(f"{self.log_prefix} 回复生成超时 ({global_config.chat.thinking_timeout}s)")
|
||||||
|
return False, "timeout"
|
||||||
|
|
||||||
|
# 检查从start_time以来的新消息数量
|
||||||
|
# 获取动作触发时间或使用默认值
|
||||||
|
current_time = time.time()
|
||||||
|
new_message_count = message_api.count_new_messages(
|
||||||
|
chat_id=self.chat_id, start_time=start_time, end_time=current_time
|
||||||
)
|
)
|
||||||
except asyncio.TimeoutError:
|
|
||||||
logger.warning(f"{self.log_prefix} 回复生成超时 ({global_config.chat.thinking_timeout}s)")
|
|
||||||
return False, "timeout"
|
|
||||||
|
|
||||||
# 检查从start_time以来的新消息数量
|
# 根据新消息数量决定是否使用reply_to
|
||||||
# 获取动作触发时间或使用默认值
|
need_reply = new_message_count >= random.randint(2, 4)
|
||||||
current_time = time.time()
|
logger.info(
|
||||||
new_message_count = message_api.count_new_messages(
|
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复"
|
||||||
chat_id=self.chat_id, start_time=start_time, end_time=current_time
|
)
|
||||||
)
|
else:
|
||||||
|
reply_text = prepared_reply
|
||||||
# 根据新消息数量决定是否使用reply_to
|
|
||||||
need_reply = new_message_count >= random.randint(2, 4)
|
|
||||||
logger.info(
|
|
||||||
f"{self.log_prefix} 从思考到回复,共有{new_message_count}条新消息,{'使用' if need_reply else '不使用'}引用回复"
|
|
||||||
)
|
|
||||||
|
|
||||||
# 构建回复文本
|
# 构建回复文本
|
||||||
reply_text = ""
|
reply_text = ""
|
||||||
|
|||||||
Reference in New Issue
Block a user