feat:给超时处理器默认值
This commit is contained in:
@@ -11,6 +11,7 @@ from datetime import datetime
|
||||
from typing import Dict
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import asyncio
|
||||
|
||||
logger = get_logger("processor")
|
||||
|
||||
@@ -59,7 +60,8 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
|
||||
obs_info = ObsInfo()
|
||||
|
||||
await self.chat_compress(obs)
|
||||
# 改为异步任务,不阻塞主流程
|
||||
asyncio.create_task(self.chat_compress(obs))
|
||||
|
||||
# 设置说话消息
|
||||
if hasattr(obs, "talking_message_str"):
|
||||
@@ -95,15 +97,20 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
return processed_infos
|
||||
|
||||
async def chat_compress(self, obs: ChattingObservation):
|
||||
log_msg = ""
|
||||
if obs.compressor_prompt:
|
||||
summary = ""
|
||||
try:
|
||||
summary_result, _, _ = await self.model_summary.generate_response(obs.compressor_prompt)
|
||||
summary = "没有主题的闲聊" # 默认值
|
||||
if summary_result: # 确保结果不为空
|
||||
summary = "没有主题的闲聊"
|
||||
if summary_result:
|
||||
summary = summary_result
|
||||
except Exception as e:
|
||||
logger.error(f"总结主题失败 for chat {obs.chat_id}: {e}")
|
||||
log_msg = f"总结主题失败 for chat {obs.chat_id}: {e}"
|
||||
logger.error(log_msg)
|
||||
else:
|
||||
log_msg = f"chat_compress 完成 for chat {obs.chat_id}, summary: {summary}"
|
||||
logger.info(log_msg)
|
||||
|
||||
mid_memory = {
|
||||
"id": str(int(datetime.now().timestamp())),
|
||||
@@ -130,3 +137,5 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
obs.compressor_prompt = ""
|
||||
obs.oldest_messages = []
|
||||
obs.oldest_messages_str = ""
|
||||
|
||||
return log_msg
|
||||
|
||||
@@ -122,11 +122,24 @@ class ActionPlanner:
|
||||
|
||||
|
||||
|
||||
# 继续处理其他信息
|
||||
self_info = ""
|
||||
current_mind = ""
|
||||
# 设置默认值
|
||||
nickname_str = ""
|
||||
for nicknames in global_config.bot.alias_names:
|
||||
nickname_str += f"{nicknames},"
|
||||
name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。"
|
||||
|
||||
personality_block = individuality.get_personality_prompt(x_person=2, level=2)
|
||||
identity_block = individuality.get_identity_prompt(x_person=2, level=2)
|
||||
|
||||
|
||||
self_info = name_block + personality_block + identity_block
|
||||
current_mind = "你思考了很久,没有想清晰要做什么"
|
||||
cycle_info = ""
|
||||
structured_info = ""
|
||||
extra_info = []
|
||||
observed_messages = []
|
||||
observed_messages_str = ""
|
||||
chat_type = "group"
|
||||
is_group_chat = True
|
||||
for info in all_plan_info:
|
||||
if isinstance(info, ObsInfo):
|
||||
|
||||
@@ -48,6 +48,8 @@ class PicAction(PluginAction):
|
||||
):
|
||||
super().__init__(action_data, reasoning, cycle_timers, thinking_id, global_config, **kwargs)
|
||||
|
||||
logger.info(f"{self.log_prefix} 开始绘图!原因是:{self.reasoning}")
|
||||
|
||||
http_base_url = self.config.get("base_url")
|
||||
http_api_key = self.config.get("volcano_generate_api_key")
|
||||
|
||||
|
||||
@@ -93,9 +93,9 @@ talk_frequency_down_groups = [] #降低回复频率的群号码
|
||||
[focus_chat] #专注聊天
|
||||
think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
|
||||
|
||||
observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖
|
||||
compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||
compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
observation_context_size = 16 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖
|
||||
compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
|
||||
[focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗
|
||||
self_identify_processor = true # 是否启用自我识别处理器
|
||||
|
||||
Reference in New Issue
Block a user