better:优化回复逻辑,现在回复前会先思考,移除推理模型再回复中的使用,优化心流运行逻辑,优化思考时间计算逻辑,添加错误检测
This commit is contained in:
@@ -106,7 +106,7 @@ class Heartflow:
|
|||||||
self.current_mind = reponse
|
self.current_mind = reponse
|
||||||
logger.info(f"麦麦的总体脑内状态:{self.current_mind}")
|
logger.info(f"麦麦的总体脑内状态:{self.current_mind}")
|
||||||
# logger.info("麦麦想了想,当前活动:")
|
# logger.info("麦麦想了想,当前活动:")
|
||||||
await bot_schedule.move_doing(self.current_mind)
|
# await bot_schedule.move_doing(self.current_mind)
|
||||||
|
|
||||||
for _, subheartflow in self._subheartflows.items():
|
for _, subheartflow in self._subheartflows.items():
|
||||||
subheartflow.main_heartflow_info = reponse
|
subheartflow.main_heartflow_info = reponse
|
||||||
|
|||||||
@@ -52,9 +52,9 @@ class ChattingObservation(Observation):
|
|||||||
new_messages_str = ""
|
new_messages_str = ""
|
||||||
for msg in new_messages:
|
for msg in new_messages:
|
||||||
if "detailed_plain_text" in msg:
|
if "detailed_plain_text" in msg:
|
||||||
new_messages_str += f"{msg['detailed_plain_text']}\n"
|
new_messages_str += f"{msg['detailed_plain_text']}"
|
||||||
|
|
||||||
print(f"new_messages_str:{new_messages_str}")
|
# print(f"new_messages_str:{new_messages_str}")
|
||||||
|
|
||||||
# 将新消息添加到talking_message,同时保持列表长度不超过20条
|
# 将新消息添加到talking_message,同时保持列表长度不超过20条
|
||||||
self.talking_message.extend(new_messages)
|
self.talking_message.extend(new_messages)
|
||||||
@@ -112,7 +112,7 @@ class ChattingObservation(Observation):
|
|||||||
# 基于已经有的talking_summary,和新的talking_message,生成一个summary
|
# 基于已经有的talking_summary,和新的talking_message,生成一个summary
|
||||||
# print(f"更新聊天总结:{self.talking_summary}")
|
# print(f"更新聊天总结:{self.talking_summary}")
|
||||||
prompt = ""
|
prompt = ""
|
||||||
prompt = f"你正在参与一个qq群聊的讨论,这个群之前在聊的内容是:{self.observe_info}\n"
|
prompt = f"你正在参与一个qq群聊的讨论,你记得这个群之前在聊的内容是:{self.observe_info}\n"
|
||||||
prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{new_messages_str}\n"
|
prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{new_messages_str}\n"
|
||||||
prompt += """以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,
|
prompt += """以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,
|
||||||
以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n"""
|
以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n"""
|
||||||
|
|||||||
@@ -87,13 +87,10 @@ class SubHeartflow:
|
|||||||
self.is_active = True
|
self.is_active = True
|
||||||
self.last_active_time = current_time # 更新最后激活时间
|
self.last_active_time = current_time # 更新最后激活时间
|
||||||
|
|
||||||
observation = self.observations[0]
|
|
||||||
await observation.observe()
|
|
||||||
|
|
||||||
self.current_state.update_current_state_info()
|
self.current_state.update_current_state_info()
|
||||||
|
|
||||||
await self.do_a_thinking()
|
# await self.do_a_thinking()
|
||||||
await self.judge_willing()
|
# await self.judge_willing()
|
||||||
await asyncio.sleep(global_config.sub_heart_flow_update_interval)
|
await asyncio.sleep(global_config.sub_heart_flow_update_interval)
|
||||||
|
|
||||||
# 检查是否超过10分钟没有激活
|
# 检查是否超过10分钟没有激活
|
||||||
@@ -107,7 +104,7 @@ class SubHeartflow:
|
|||||||
|
|
||||||
observation = self.observations[0]
|
observation = self.observations[0]
|
||||||
chat_observe_info = observation.observe_info
|
chat_observe_info = observation.observe_info
|
||||||
print(f"chat_observe_info:{chat_observe_info}")
|
# print(f"chat_observe_info:{chat_observe_info}")
|
||||||
|
|
||||||
# 调取记忆
|
# 调取记忆
|
||||||
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
||||||
@@ -145,7 +142,56 @@ class SubHeartflow:
|
|||||||
logger.debug(f"prompt:\n{prompt}\n")
|
logger.debug(f"prompt:\n{prompt}\n")
|
||||||
logger.info(f"麦麦的脑内状态:{self.current_mind}")
|
logger.info(f"麦麦的脑内状态:{self.current_mind}")
|
||||||
|
|
||||||
async def do_after_reply(self, reply_content, chat_talking_prompt):
|
async def do_observe(self):
|
||||||
|
observation = self.observations[0]
|
||||||
|
await observation.observe()
|
||||||
|
|
||||||
|
async def do_thinking_before_reply(self, message_txt):
|
||||||
|
current_thinking_info = self.current_mind
|
||||||
|
mood_info = self.current_state.mood
|
||||||
|
mood_info = "你很生气,很愤怒"
|
||||||
|
observation = self.observations[0]
|
||||||
|
chat_observe_info = observation.observe_info
|
||||||
|
# print(f"chat_observe_info:{chat_observe_info}")
|
||||||
|
|
||||||
|
# 调取记忆
|
||||||
|
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
||||||
|
text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
|
||||||
|
)
|
||||||
|
|
||||||
|
if related_memory:
|
||||||
|
related_memory_info = ""
|
||||||
|
for memory in related_memory:
|
||||||
|
related_memory_info += memory[1]
|
||||||
|
else:
|
||||||
|
related_memory_info = ""
|
||||||
|
|
||||||
|
# print(f"相关记忆:{related_memory_info}")
|
||||||
|
|
||||||
|
schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False)
|
||||||
|
|
||||||
|
prompt = ""
|
||||||
|
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
|
||||||
|
prompt += f"你{self.personality_info}\n"
|
||||||
|
prompt += f"你刚刚在做的事情是:{schedule_info}\n"
|
||||||
|
if related_memory_info:
|
||||||
|
prompt += f"你想起来你之前见过的回忆:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
|
||||||
|
prompt += f"刚刚你的想法是{current_thinking_info}。\n"
|
||||||
|
prompt += "-----------------------------------\n"
|
||||||
|
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
||||||
|
prompt += f"你现在{mood_info}\n"
|
||||||
|
prompt += f"你注意到有人刚刚说:{message_txt}\n"
|
||||||
|
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
|
||||||
|
prompt += "记得结合上述的消息,要记得维持住你的人设,注意自己的名字,关注有人刚刚说的内容,不要思考太多:"
|
||||||
|
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
|
|
||||||
|
self.update_current_mind(reponse)
|
||||||
|
|
||||||
|
self.current_mind = reponse
|
||||||
|
logger.debug(f"prompt:\n{prompt}\n")
|
||||||
|
logger.info(f"麦麦的思考前脑内状态:{self.current_mind}")
|
||||||
|
|
||||||
|
async def do_thinking_after_reply(self, reply_content, chat_talking_prompt):
|
||||||
print("麦麦回复之后脑袋转起来了")
|
print("麦麦回复之后脑袋转起来了")
|
||||||
current_thinking_info = self.current_mind
|
current_thinking_info = self.current_mind
|
||||||
mood_info = self.current_state.mood
|
mood_info = self.current_state.mood
|
||||||
@@ -155,10 +201,10 @@ class SubHeartflow:
|
|||||||
|
|
||||||
message_new_info = chat_talking_prompt
|
message_new_info = chat_talking_prompt
|
||||||
reply_info = reply_content
|
reply_info = reply_content
|
||||||
schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False)
|
# schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False)
|
||||||
|
|
||||||
prompt = ""
|
prompt = ""
|
||||||
prompt += f"你现在正在做的事情是:{schedule_info}\n"
|
# prompt += f"你现在正在做的事情是:{schedule_info}\n"
|
||||||
prompt += f"你{self.personality_info}\n"
|
prompt += f"你{self.personality_info}\n"
|
||||||
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
||||||
prompt += f"刚刚你的想法是{current_thinking_info}。"
|
prompt += f"刚刚你的想法是{current_thinking_info}。"
|
||||||
|
|||||||
@@ -47,6 +47,39 @@ class ChatBot:
|
|||||||
if not self._started:
|
if not self._started:
|
||||||
self._started = True
|
self._started = True
|
||||||
|
|
||||||
|
async def _create_thinking_message(self, message, chat, userinfo, messageinfo):
|
||||||
|
"""创建思考消息
|
||||||
|
|
||||||
|
Args:
|
||||||
|
message: 接收到的消息
|
||||||
|
chat: 聊天流对象
|
||||||
|
userinfo: 用户信息对象
|
||||||
|
messageinfo: 消息信息对象
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: thinking_id
|
||||||
|
"""
|
||||||
|
bot_user_info = UserInfo(
|
||||||
|
user_id=global_config.BOT_QQ,
|
||||||
|
user_nickname=global_config.BOT_NICKNAME,
|
||||||
|
platform=messageinfo.platform,
|
||||||
|
)
|
||||||
|
|
||||||
|
thinking_time_point = round(time.time(), 2)
|
||||||
|
thinking_id = "mt" + str(thinking_time_point)
|
||||||
|
thinking_message = MessageThinking(
|
||||||
|
message_id=thinking_id,
|
||||||
|
chat_stream=chat,
|
||||||
|
bot_user_info=bot_user_info,
|
||||||
|
reply=message,
|
||||||
|
thinking_start_time=thinking_time_point,
|
||||||
|
)
|
||||||
|
|
||||||
|
message_manager.add_message(thinking_message)
|
||||||
|
willing_manager.change_reply_willing_sent(chat)
|
||||||
|
|
||||||
|
return thinking_id
|
||||||
|
|
||||||
async def message_process(self, message_data: str) -> None:
|
async def message_process(self, message_data: str) -> None:
|
||||||
"""处理转化后的统一格式消息
|
"""处理转化后的统一格式消息
|
||||||
1. 过滤消息
|
1. 过滤消息
|
||||||
@@ -56,6 +89,8 @@ class ChatBot:
|
|||||||
5. 更新关系
|
5. 更新关系
|
||||||
6. 更新情绪
|
6. 更新情绪
|
||||||
"""
|
"""
|
||||||
|
timing_results = {} # 用于收集所有计时结果
|
||||||
|
response_set = None # 初始化response_set变量
|
||||||
|
|
||||||
message = MessageRecv(message_data)
|
message = MessageRecv(message_data)
|
||||||
groupinfo = message.message_info.group_info
|
groupinfo = message.message_info.group_info
|
||||||
@@ -75,10 +110,7 @@ class ChatBot:
|
|||||||
# 创建 心流与chat的观察
|
# 创建 心流与chat的观察
|
||||||
heartflow.create_subheartflow(chat.stream_id)
|
heartflow.create_subheartflow(chat.stream_id)
|
||||||
|
|
||||||
timer1 = time.time()
|
|
||||||
await message.process()
|
await message.process()
|
||||||
timer2 = time.time()
|
|
||||||
logger.debug(f"2消息处理时间: {timer2 - timer1}秒")
|
|
||||||
|
|
||||||
# 过滤词/正则表达式过滤
|
# 过滤词/正则表达式过滤
|
||||||
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
|
if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
|
||||||
@@ -94,7 +126,7 @@ class ChatBot:
|
|||||||
message.processed_plain_text, fast_retrieval=True
|
message.processed_plain_text, fast_retrieval=True
|
||||||
)
|
)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
logger.debug(f"3记忆激活时间: {timer2 - timer1}秒")
|
timing_results["记忆激活"] = timer2 - timer1
|
||||||
|
|
||||||
is_mentioned = is_mentioned_bot_in_message(message)
|
is_mentioned = is_mentioned_bot_in_message(message)
|
||||||
|
|
||||||
@@ -118,7 +150,7 @@ class ChatBot:
|
|||||||
sender_id=str(message.message_info.user_info.user_id),
|
sender_id=str(message.message_info.user_info.user_id),
|
||||||
)
|
)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
logger.debug(f"4计算意愿激活时间: {timer2 - timer1}秒")
|
timing_results["意愿激活"] = timer2 - timer1
|
||||||
|
|
||||||
# 神秘的消息流数据结构处理
|
# 神秘的消息流数据结构处理
|
||||||
if chat.group_info:
|
if chat.group_info:
|
||||||
@@ -138,12 +170,30 @@ class ChatBot:
|
|||||||
if "maimcore_reply_probability_gain" in message.message_info.additional_config.keys():
|
if "maimcore_reply_probability_gain" in message.message_info.additional_config.keys():
|
||||||
reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"]
|
reply_probability += message.message_info.additional_config["maimcore_reply_probability_gain"]
|
||||||
|
|
||||||
|
do_reply = False
|
||||||
# 开始组织语言
|
# 开始组织语言
|
||||||
if random() < reply_probability:
|
if random() < reply_probability:
|
||||||
|
do_reply = True
|
||||||
|
|
||||||
timer1 = time.time()
|
timer1 = time.time()
|
||||||
response_set, thinking_id = await self._generate_response_from_message(message, chat, userinfo, messageinfo)
|
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
logger.info(f"5生成回复时间: {timer2 - timer1}秒")
|
timing_results["创建思考消息"] = timer2 - timer1
|
||||||
|
|
||||||
|
timer1 = time.time()
|
||||||
|
await heartflow.get_subheartflow(chat.stream_id).do_observe()
|
||||||
|
timer2 = time.time()
|
||||||
|
timing_results["观察"] = timer2 - timer1
|
||||||
|
|
||||||
|
timer1 = time.time()
|
||||||
|
await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(message.processed_plain_text)
|
||||||
|
timer2 = time.time()
|
||||||
|
timing_results["思考前脑内状态"] = timer2 - timer1
|
||||||
|
|
||||||
|
timer1 = time.time()
|
||||||
|
response_set = await self.gpt.generate_response(message)
|
||||||
|
timer2 = time.time()
|
||||||
|
timing_results["生成回复"] = timer2 - timer1
|
||||||
|
|
||||||
if not response_set:
|
if not response_set:
|
||||||
logger.info("为什么生成回复失败?")
|
logger.info("为什么生成回复失败?")
|
||||||
@@ -153,56 +203,25 @@ class ChatBot:
|
|||||||
timer1 = time.time()
|
timer1 = time.time()
|
||||||
await self._send_response_messages(message, chat, response_set, thinking_id)
|
await self._send_response_messages(message, chat, response_set, thinking_id)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
logger.info(f"7发送消息时间: {timer2 - timer1}秒")
|
timing_results["发送消息"] = timer2 - timer1
|
||||||
|
|
||||||
# 处理表情包
|
# 处理表情包
|
||||||
timer1 = time.time()
|
timer1 = time.time()
|
||||||
await self._handle_emoji(message, chat, response_set)
|
await self._handle_emoji(message, chat, response_set)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
logger.debug(f"8处理表情包时间: {timer2 - timer1}秒")
|
timing_results["处理表情包"] = timer2 - timer1
|
||||||
|
|
||||||
timer1 = time.time()
|
timer1 = time.time()
|
||||||
await self._update_using_response(message, response_set)
|
await self._update_using_response(message, response_set)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
logger.info(f"6更新htfl时间: {timer2 - timer1}秒")
|
timing_results["更新心流"] = timer2 - timer1
|
||||||
|
|
||||||
# 更新情绪和关系
|
# 在最后统一输出所有计时结果
|
||||||
# await self._update_emotion_and_relationship(message, chat, response_set)
|
if do_reply:
|
||||||
|
timing_str = " | ".join([f"{step}: {duration:.2f}秒" for step, duration in timing_results.items()])
|
||||||
async def _generate_response_from_message(self, message, chat, userinfo, messageinfo):
|
trigger_msg = message.processed_plain_text
|
||||||
"""生成回复内容
|
response_msg = " ".join(response_set) if response_set else "无回复"
|
||||||
|
logger.info(f"触发消息: {trigger_msg[:20]}... | 生成消息: {response_msg[:20]}... | 性能计时: {timing_str}")
|
||||||
Args:
|
|
||||||
message: 接收到的消息
|
|
||||||
chat: 聊天流对象
|
|
||||||
userinfo: 用户信息对象
|
|
||||||
messageinfo: 消息信息对象
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple: (response, raw_content) 回复内容和原始内容
|
|
||||||
"""
|
|
||||||
bot_user_info = UserInfo(
|
|
||||||
user_id=global_config.BOT_QQ,
|
|
||||||
user_nickname=global_config.BOT_NICKNAME,
|
|
||||||
platform=messageinfo.platform,
|
|
||||||
)
|
|
||||||
|
|
||||||
thinking_time_point = round(time.time(), 2)
|
|
||||||
thinking_id = "mt" + str(thinking_time_point)
|
|
||||||
thinking_message = MessageThinking(
|
|
||||||
message_id=thinking_id,
|
|
||||||
chat_stream=chat,
|
|
||||||
bot_user_info=bot_user_info,
|
|
||||||
reply=message,
|
|
||||||
thinking_start_time=thinking_time_point,
|
|
||||||
)
|
|
||||||
|
|
||||||
message_manager.add_message(thinking_message)
|
|
||||||
willing_manager.change_reply_willing_sent(chat)
|
|
||||||
|
|
||||||
response_set = await self.gpt.generate_response(message)
|
|
||||||
|
|
||||||
return response_set, thinking_id
|
|
||||||
|
|
||||||
async def _update_using_response(self, message, response_set):
|
async def _update_using_response(self, message, response_set):
|
||||||
# 更新心流状态
|
# 更新心流状态
|
||||||
@@ -213,7 +232,7 @@ class ChatBot:
|
|||||||
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
||||||
)
|
)
|
||||||
|
|
||||||
await heartflow.get_subheartflow(stream_id).do_after_reply(response_set, chat_talking_prompt)
|
await heartflow.get_subheartflow(stream_id).do_thinking_after_reply(response_set, chat_talking_prompt)
|
||||||
|
|
||||||
async def _send_response_messages(self, message, chat, response_set, thinking_id):
|
async def _send_response_messages(self, message, chat, response_set, thinking_id):
|
||||||
container = message_manager.get_container(chat.stream_id)
|
container = message_manager.get_container(chat.stream_id)
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ class ResponseGenerator:
|
|||||||
request_type="response",
|
request_type="response",
|
||||||
)
|
)
|
||||||
self.model_normal = LLM_request(
|
self.model_normal = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.7, max_tokens=3000, request_type="response"
|
model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLM_request(
|
self.model_sum = LLM_request(
|
||||||
@@ -42,20 +42,26 @@ class ResponseGenerator:
|
|||||||
async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]:
|
async def generate_response(self, message: MessageThinking) -> Optional[Union[str, List[str]]]:
|
||||||
"""根据当前模型类型选择对应的生成函数"""
|
"""根据当前模型类型选择对应的生成函数"""
|
||||||
# 从global_config中获取模型概率值并选择模型
|
# 从global_config中获取模型概率值并选择模型
|
||||||
if random.random() < global_config.MODEL_R1_PROBABILITY:
|
# if random.random() < global_config.MODEL_R1_PROBABILITY:
|
||||||
self.current_model_type = "深深地"
|
# self.current_model_type = "深深地"
|
||||||
current_model = self.model_reasoning
|
# current_model = self.model_reasoning
|
||||||
else:
|
# else:
|
||||||
self.current_model_type = "浅浅的"
|
# self.current_model_type = "浅浅的"
|
||||||
current_model = self.model_normal
|
# current_model = self.model_normal
|
||||||
|
|
||||||
|
# logger.info(
|
||||||
|
# f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||||
|
# ) # noqa: E501
|
||||||
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
f"思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||||
) # noqa: E501
|
)
|
||||||
|
|
||||||
|
current_model = self.model_normal
|
||||||
model_response = await self._generate_response_with_model(message, current_model)
|
model_response = await self._generate_response_with_model(message, current_model)
|
||||||
|
|
||||||
print(f"raw_content: {model_response}")
|
# print(f"raw_content: {model_response}")
|
||||||
|
|
||||||
if model_response:
|
if model_response:
|
||||||
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}")
|
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response}")
|
||||||
@@ -126,8 +132,6 @@ class ResponseGenerator:
|
|||||||
"user": sender_name,
|
"user": sender_name,
|
||||||
"message": message.processed_plain_text,
|
"message": message.processed_plain_text,
|
||||||
"model": self.current_model_name,
|
"model": self.current_model_name,
|
||||||
# 'reasoning_check': reasoning_content_check,
|
|
||||||
# 'response_check': content_check,
|
|
||||||
"reasoning": reasoning_content,
|
"reasoning": reasoning_content,
|
||||||
"response": content,
|
"response": content,
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
|
|||||||
@@ -188,11 +188,11 @@ class MessageManager:
|
|||||||
# print(message_earliest.is_head)
|
# print(message_earliest.is_head)
|
||||||
# print(message_earliest.update_thinking_time())
|
# print(message_earliest.update_thinking_time())
|
||||||
# print(message_earliest.is_private_message())
|
# print(message_earliest.is_private_message())
|
||||||
# thinking_time = message_earliest.update_thinking_time()
|
thinking_time = message_earliest.update_thinking_time()
|
||||||
# print(thinking_time)
|
print(thinking_time)
|
||||||
if (
|
if (
|
||||||
message_earliest.is_head
|
message_earliest.is_head
|
||||||
and message_earliest.update_thinking_time() > 50
|
and message_earliest.update_thinking_time() > 8
|
||||||
and not message_earliest.is_private_message() # 避免在私聊时插入reply
|
and not message_earliest.is_private_message() # 避免在私聊时插入reply
|
||||||
):
|
):
|
||||||
logger.debug(f"设置回复消息{message_earliest.processed_plain_text}")
|
logger.debug(f"设置回复消息{message_earliest.processed_plain_text}")
|
||||||
@@ -215,11 +215,11 @@ class MessageManager:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# print(msg.is_head)
|
# print(msg.is_head)
|
||||||
# print(msg.update_thinking_time())
|
print(msg.update_thinking_time())
|
||||||
# print(msg.is_private_message())
|
# print(msg.is_private_message())
|
||||||
if (
|
if (
|
||||||
msg.is_head
|
msg.is_head
|
||||||
and msg.update_thinking_time() > 50
|
and msg.update_thinking_time() > 8
|
||||||
and not msg.is_private_message() # 避免在私聊时插入reply
|
and not msg.is_private_message() # 避免在私聊时插入reply
|
||||||
):
|
):
|
||||||
logger.debug(f"设置回复消息{msg.processed_plain_text}")
|
logger.debug(f"设置回复消息{msg.processed_plain_text}")
|
||||||
|
|||||||
@@ -24,27 +24,9 @@ class PromptBuilder:
|
|||||||
async def _build_prompt(
|
async def _build_prompt(
|
||||||
self, chat_stream, message_txt: str, sender_name: str = "某人", stream_id: Optional[int] = None
|
self, chat_stream, message_txt: str, sender_name: str = "某人", stream_id: Optional[int] = None
|
||||||
) -> tuple[str, str]:
|
) -> tuple[str, str]:
|
||||||
# 关系(载入当前聊天记录里部分人的关系)
|
|
||||||
# who_chat_in_group = [chat_stream]
|
|
||||||
# who_chat_in_group += get_recent_group_speaker(
|
|
||||||
# stream_id,
|
|
||||||
# (chat_stream.user_info.user_id, chat_stream.user_info.platform),
|
|
||||||
# limit=global_config.MAX_CONTEXT_SIZE,
|
|
||||||
# )
|
|
||||||
|
|
||||||
# outer_world_info = outer_world.outer_world_info
|
|
||||||
|
|
||||||
current_mind_info = heartflow.get_subheartflow(stream_id).current_mind
|
current_mind_info = heartflow.get_subheartflow(stream_id).current_mind
|
||||||
|
|
||||||
# relation_prompt = ""
|
|
||||||
# for person in who_chat_in_group:
|
|
||||||
# relation_prompt += relationship_manager.build_relationship_info(person)
|
|
||||||
|
|
||||||
# relation_prompt_all = (
|
|
||||||
# f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
|
|
||||||
# f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
|
|
||||||
# )
|
|
||||||
|
|
||||||
# 开始构建prompt
|
# 开始构建prompt
|
||||||
|
|
||||||
# 心情
|
# 心情
|
||||||
@@ -71,25 +53,6 @@ class PromptBuilder:
|
|||||||
chat_talking_prompt = chat_talking_prompt
|
chat_talking_prompt = chat_talking_prompt
|
||||||
# print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
|
# print(f"\033[1;34m[调试]\033[0m 已从数据库获取群 {group_id} 的消息记录:{chat_talking_prompt}")
|
||||||
|
|
||||||
# 使用新的记忆获取方法
|
|
||||||
memory_prompt = ""
|
|
||||||
start_time = time.time()
|
|
||||||
|
|
||||||
# 调用 hippocampus 的 get_relevant_memories 方法
|
|
||||||
relevant_memories = await HippocampusManager.get_instance().get_memory_from_text(
|
|
||||||
text=message_txt, max_memory_num=3, max_memory_length=2, max_depth=2, fast_retrieval=False
|
|
||||||
)
|
|
||||||
memory_str = ""
|
|
||||||
for _topic, memories in relevant_memories:
|
|
||||||
memory_str += f"{memories}\n"
|
|
||||||
|
|
||||||
if relevant_memories:
|
|
||||||
# 格式化记忆内容
|
|
||||||
memory_prompt = f"你回忆起:\n{memory_str}\n"
|
|
||||||
|
|
||||||
end_time = time.time()
|
|
||||||
logger.info(f"回忆耗时: {(end_time - start_time):.3f}秒")
|
|
||||||
|
|
||||||
# 类型
|
# 类型
|
||||||
if chat_in_group:
|
if chat_in_group:
|
||||||
chat_target = "你正在qq群里聊天,下面是群里在聊的内容:"
|
chat_target = "你正在qq群里聊天,下面是群里在聊的内容:"
|
||||||
@@ -146,19 +109,18 @@ class PromptBuilder:
|
|||||||
涉及政治敏感以及违法违规的内容请规避。"""
|
涉及政治敏感以及违法违规的内容请规避。"""
|
||||||
|
|
||||||
logger.info("开始构建prompt")
|
logger.info("开始构建prompt")
|
||||||
|
|
||||||
prompt = f"""
|
prompt = f"""
|
||||||
{prompt_info}
|
{prompt_info}
|
||||||
{memory_prompt}
|
|
||||||
你刚刚脑子里在想:
|
|
||||||
{current_mind_info}
|
|
||||||
|
|
||||||
{chat_target}
|
{chat_target}
|
||||||
{chat_talking_prompt}
|
{chat_talking_prompt}
|
||||||
现在"{sender_name}"说的:{message_txt}。引起了你的注意,{mood_prompt}\n
|
你刚刚脑子里在想:
|
||||||
|
{current_mind_info}
|
||||||
|
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
|
||||||
你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。
|
你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality}。
|
||||||
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
|
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
|
||||||
尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
|
尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
|
||||||
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,
|
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
|
||||||
请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
|
请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
|
||||||
{moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。"""
|
{moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。"""
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ class ImageManager:
|
|||||||
self._ensure_description_collection()
|
self._ensure_description_collection()
|
||||||
self._ensure_image_dir()
|
self._ensure_image_dir()
|
||||||
self._initialized = True
|
self._initialized = True
|
||||||
self._llm = LLM_request(model=global_config.vlm, temperature=0.4, max_tokens=1000, request_type="image")
|
self._llm = LLM_request(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image")
|
||||||
|
|
||||||
def _ensure_image_dir(self):
|
def _ensure_image_dir(self):
|
||||||
"""确保图像存储目录存在"""
|
"""确保图像存储目录存在"""
|
||||||
@@ -171,7 +171,7 @@ class ImageManager:
|
|||||||
|
|
||||||
# 调用AI获取描述
|
# 调用AI获取描述
|
||||||
prompt = (
|
prompt = (
|
||||||
"请用中文描述这张图片的内容。如果有文字,请把文字都描述出来。并尝试猜测这个图片的含义。最多200个字。"
|
"请用中文描述这张图片的内容。如果有文字,请把文字都描述出来。并尝试猜测这个图片的含义。最多100个字。"
|
||||||
)
|
)
|
||||||
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
|
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
|
||||||
|
|
||||||
|
|||||||
@@ -231,7 +231,7 @@ class BotConfig:
|
|||||||
|
|
||||||
# 模型配置
|
# 模型配置
|
||||||
llm_reasoning: Dict[str, str] = field(default_factory=lambda: {})
|
llm_reasoning: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
llm_reasoning_minor: Dict[str, str] = field(default_factory=lambda: {})
|
# llm_reasoning_minor: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
llm_normal: Dict[str, str] = field(default_factory=lambda: {})
|
llm_normal: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
llm_topic_judge: Dict[str, str] = field(default_factory=lambda: {})
|
llm_topic_judge: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
llm_summary_by_topic: Dict[str, str] = field(default_factory=lambda: {})
|
llm_summary_by_topic: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
@@ -370,9 +370,9 @@ class BotConfig:
|
|||||||
response_config = parent["response"]
|
response_config = parent["response"]
|
||||||
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
|
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
|
||||||
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
|
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
|
||||||
config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
|
# config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
|
||||||
"model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
|
# "model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
|
||||||
)
|
# )
|
||||||
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
|
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
|
||||||
|
|
||||||
def willing(parent: dict):
|
def willing(parent: dict):
|
||||||
@@ -397,7 +397,7 @@ class BotConfig:
|
|||||||
|
|
||||||
config_list = [
|
config_list = [
|
||||||
"llm_reasoning",
|
"llm_reasoning",
|
||||||
"llm_reasoning_minor",
|
# "llm_reasoning_minor",
|
||||||
"llm_normal",
|
"llm_normal",
|
||||||
"llm_topic_judge",
|
"llm_topic_judge",
|
||||||
"llm_summary_by_topic",
|
"llm_summary_by_topic",
|
||||||
|
|||||||
@@ -697,6 +697,11 @@ class ParahippocampalGyrus:
|
|||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
logger.info("[遗忘] 开始检查数据库...")
|
logger.info("[遗忘] 开始检查数据库...")
|
||||||
|
|
||||||
|
# 验证百分比参数
|
||||||
|
if not 0 <= percentage <= 1:
|
||||||
|
logger.warning(f"[遗忘] 无效的遗忘百分比: {percentage}, 使用默认值 0.005")
|
||||||
|
percentage = 0.005
|
||||||
|
|
||||||
all_nodes = list(self.memory_graph.G.nodes())
|
all_nodes = list(self.memory_graph.G.nodes())
|
||||||
all_edges = list(self.memory_graph.G.edges())
|
all_edges = list(self.memory_graph.G.edges())
|
||||||
|
|
||||||
@@ -704,11 +709,21 @@ class ParahippocampalGyrus:
|
|||||||
logger.info("[遗忘] 记忆图为空,无需进行遗忘操作")
|
logger.info("[遗忘] 记忆图为空,无需进行遗忘操作")
|
||||||
return
|
return
|
||||||
|
|
||||||
check_nodes_count = max(1, int(len(all_nodes) * percentage))
|
# 确保至少检查1个节点和边,且不超过总数
|
||||||
check_edges_count = max(1, int(len(all_edges) * percentage))
|
check_nodes_count = max(1, min(len(all_nodes), int(len(all_nodes) * percentage)))
|
||||||
|
check_edges_count = max(1, min(len(all_edges), int(len(all_edges) * percentage)))
|
||||||
|
|
||||||
nodes_to_check = random.sample(all_nodes, check_nodes_count)
|
# 只有在有足够的节点和边时才进行采样
|
||||||
edges_to_check = random.sample(all_edges, check_edges_count)
|
if len(all_nodes) >= check_nodes_count and len(all_edges) >= check_edges_count:
|
||||||
|
try:
|
||||||
|
nodes_to_check = random.sample(all_nodes, check_nodes_count)
|
||||||
|
edges_to_check = random.sample(all_edges, check_edges_count)
|
||||||
|
except ValueError as e:
|
||||||
|
logger.error(f"[遗忘] 采样错误: {str(e)}")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
logger.info("[遗忘] 没有足够的节点或边进行遗忘操作")
|
||||||
|
return
|
||||||
|
|
||||||
# 使用列表存储变化信息
|
# 使用列表存储变化信息
|
||||||
edge_changes = {
|
edge_changes = {
|
||||||
|
|||||||
@@ -58,8 +58,18 @@ class MemoryBuildScheduler:
|
|||||||
weight2 (float): 第二个分布的权重
|
weight2 (float): 第二个分布的权重
|
||||||
total_samples (int): 要生成的总时间点数量
|
total_samples (int): 要生成的总时间点数量
|
||||||
"""
|
"""
|
||||||
|
# 验证参数
|
||||||
|
if total_samples <= 0:
|
||||||
|
raise ValueError("total_samples 必须大于0")
|
||||||
|
if weight1 < 0 or weight2 < 0:
|
||||||
|
raise ValueError("权重必须为非负数")
|
||||||
|
if std_hours1 < 0 or std_hours2 < 0:
|
||||||
|
raise ValueError("标准差必须为非负数")
|
||||||
|
|
||||||
# 归一化权重
|
# 归一化权重
|
||||||
total_weight = weight1 + weight2
|
total_weight = weight1 + weight2
|
||||||
|
if total_weight == 0:
|
||||||
|
raise ValueError("权重总和不能为0")
|
||||||
self.weight1 = weight1 / total_weight
|
self.weight1 = weight1 / total_weight
|
||||||
self.weight2 = weight2 / total_weight
|
self.weight2 = weight2 / total_weight
|
||||||
|
|
||||||
@@ -73,12 +83,11 @@ class MemoryBuildScheduler:
|
|||||||
def generate_time_samples(self):
|
def generate_time_samples(self):
|
||||||
"""生成混合分布的时间采样点"""
|
"""生成混合分布的时间采样点"""
|
||||||
# 根据权重计算每个分布的样本数
|
# 根据权重计算每个分布的样本数
|
||||||
samples1 = int(self.total_samples * self.weight1)
|
samples1 = max(1, int(self.total_samples * self.weight1))
|
||||||
samples2 = self.total_samples - samples1
|
samples2 = max(1, self.total_samples - samples1) # 确保 samples2 至少为1
|
||||||
|
|
||||||
# 生成两个正态分布的小时偏移
|
# 生成两个正态分布的小时偏移
|
||||||
hours_offset1 = np.random.normal(loc=self.n_hours1, scale=self.std_hours1, size=samples1)
|
hours_offset1 = np.random.normal(loc=self.n_hours1, scale=self.std_hours1, size=samples1)
|
||||||
|
|
||||||
hours_offset2 = np.random.normal(loc=self.n_hours2, scale=self.std_hours2, size=samples2)
|
hours_offset2 = np.random.normal(loc=self.n_hours2, scale=self.std_hours2, size=samples2)
|
||||||
|
|
||||||
# 合并两个分布的偏移
|
# 合并两个分布的偏移
|
||||||
|
|||||||
@@ -285,39 +285,46 @@ class LLM_request:
|
|||||||
usage = None # 初始化usage变量,避免未定义错误
|
usage = None # 初始化usage变量,避免未定义错误
|
||||||
|
|
||||||
async for line_bytes in response.content:
|
async for line_bytes in response.content:
|
||||||
line = line_bytes.decode("utf-8").strip()
|
try:
|
||||||
if not line:
|
line = line_bytes.decode("utf-8").strip()
|
||||||
continue
|
if not line:
|
||||||
if line.startswith("data:"):
|
continue
|
||||||
data_str = line[5:].strip()
|
if line.startswith("data:"):
|
||||||
if data_str == "[DONE]":
|
data_str = line[5:].strip()
|
||||||
break
|
if data_str == "[DONE]":
|
||||||
try:
|
break
|
||||||
chunk = json.loads(data_str)
|
try:
|
||||||
if flag_delta_content_finished:
|
chunk = json.loads(data_str)
|
||||||
chunk_usage = chunk.get("usage", None)
|
if flag_delta_content_finished:
|
||||||
if chunk_usage:
|
|
||||||
usage = chunk_usage # 获取token用量
|
|
||||||
else:
|
|
||||||
delta = chunk["choices"][0]["delta"]
|
|
||||||
delta_content = delta.get("content")
|
|
||||||
if delta_content is None:
|
|
||||||
delta_content = ""
|
|
||||||
accumulated_content += delta_content
|
|
||||||
# 检测流式输出文本是否结束
|
|
||||||
finish_reason = chunk["choices"][0].get("finish_reason")
|
|
||||||
if delta.get("reasoning_content", None):
|
|
||||||
reasoning_content += delta["reasoning_content"]
|
|
||||||
if finish_reason == "stop":
|
|
||||||
chunk_usage = chunk.get("usage", None)
|
chunk_usage = chunk.get("usage", None)
|
||||||
if chunk_usage:
|
if chunk_usage:
|
||||||
usage = chunk_usage
|
usage = chunk_usage # 获取token用量
|
||||||
break
|
else:
|
||||||
# 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
|
delta = chunk["choices"][0]["delta"]
|
||||||
flag_delta_content_finished = True
|
delta_content = delta.get("content")
|
||||||
|
if delta_content is None:
|
||||||
|
delta_content = ""
|
||||||
|
accumulated_content += delta_content
|
||||||
|
# 检测流式输出文本是否结束
|
||||||
|
finish_reason = chunk["choices"][0].get("finish_reason")
|
||||||
|
if delta.get("reasoning_content", None):
|
||||||
|
reasoning_content += delta["reasoning_content"]
|
||||||
|
if finish_reason == "stop":
|
||||||
|
chunk_usage = chunk.get("usage", None)
|
||||||
|
if chunk_usage:
|
||||||
|
usage = chunk_usage
|
||||||
|
break
|
||||||
|
# 部分平台在文本输出结束前不会返回token用量,此时需要再获取一次chunk
|
||||||
|
flag_delta_content_finished = True
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(f"解析流式输出错误: {str(e)}")
|
logger.exception(f"解析流式输出错误: {str(e)}")
|
||||||
|
except GeneratorExit:
|
||||||
|
logger.warning("流式输出被中断")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"处理流式输出时发生错误: {str(e)}")
|
||||||
|
break
|
||||||
content = accumulated_content
|
content = accumulated_content
|
||||||
think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
|
think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
|
||||||
if think_match:
|
if think_match:
|
||||||
|
|||||||
@@ -176,21 +176,27 @@ class ScheduleGenerator:
|
|||||||
logger.warning(f"未找到{today_str}的日程记录")
|
logger.warning(f"未找到{today_str}的日程记录")
|
||||||
|
|
||||||
async def move_doing(self, mind_thinking: str = ""):
|
async def move_doing(self, mind_thinking: str = ""):
|
||||||
current_time = datetime.datetime.now()
|
try:
|
||||||
if mind_thinking:
|
current_time = datetime.datetime.now()
|
||||||
doing_prompt = self.construct_doing_prompt(current_time, mind_thinking)
|
if mind_thinking:
|
||||||
else:
|
doing_prompt = self.construct_doing_prompt(current_time, mind_thinking)
|
||||||
doing_prompt = self.construct_doing_prompt(current_time)
|
else:
|
||||||
|
doing_prompt = self.construct_doing_prompt(current_time)
|
||||||
|
|
||||||
# print(doing_prompt)
|
doing_response, _ = await self.llm_scheduler_doing.generate_response_async(doing_prompt)
|
||||||
doing_response, _ = await self.llm_scheduler_doing.generate_response_async(doing_prompt)
|
self.today_done_list.append((current_time, doing_response))
|
||||||
self.today_done_list.append((current_time, doing_response))
|
|
||||||
|
|
||||||
await self.update_today_done_list()
|
await self.update_today_done_list()
|
||||||
|
|
||||||
logger.info(f"当前活动: {doing_response}")
|
logger.info(f"当前活动: {doing_response}")
|
||||||
|
|
||||||
return doing_response
|
return doing_response
|
||||||
|
except GeneratorExit:
|
||||||
|
logger.warning("日程生成被中断")
|
||||||
|
return "日程生成被中断"
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"生成日程时发生错误: {str(e)}")
|
||||||
|
return "生成日程时发生错误"
|
||||||
|
|
||||||
async def get_task_from_time_to_time(self, start_time: str, end_time: str):
|
async def get_task_from_time_to_time(self, start_time: str, end_time: str):
|
||||||
"""获取指定时间范围内的任务列表
|
"""获取指定时间范围内的任务列表
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[inner]
|
[inner]
|
||||||
version = "1.0.2"
|
version = "1.0.3"
|
||||||
|
|
||||||
|
|
||||||
#以下是给开发人员阅读的,一般用户不需要阅读
|
#以下是给开发人员阅读的,一般用户不需要阅读
|
||||||
@@ -53,7 +53,7 @@ schedule_temperature = 0.5 # 日程表温度,建议0.5-1.0
|
|||||||
nonebot-qq="http://127.0.0.1:18002/api/message"
|
nonebot-qq="http://127.0.0.1:18002/api/message"
|
||||||
|
|
||||||
[heartflow] # 注意:可能会消耗大量token,请谨慎开启
|
[heartflow] # 注意:可能会消耗大量token,请谨慎开启
|
||||||
enable = false
|
enable = false #该选项未启用
|
||||||
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
|
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
|
||||||
sub_heart_flow_freeze_time = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
sub_heart_flow_freeze_time = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
||||||
sub_heart_flow_stop_time = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
sub_heart_flow_stop_time = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
||||||
@@ -63,9 +63,9 @@ heart_flow_update_interval = 300 # 心流更新频率,间隔 单位秒
|
|||||||
|
|
||||||
|
|
||||||
[message]
|
[message]
|
||||||
max_context_size = 15 # 麦麦获得的上文数量,建议15,太短太长都会导致脑袋尖尖
|
max_context_size = 12 # 麦麦获得的上文数量,建议12,太短太长都会导致脑袋尖尖
|
||||||
emoji_chance = 0.2 # 麦麦使用表情包的概率
|
emoji_chance = 0.2 # 麦麦使用表情包的概率
|
||||||
thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃
|
thinking_timeout = 60 # 麦麦最长思考时间,超过这个时间的思考会放弃
|
||||||
max_response_length = 256 # 麦麦回答的最大token数
|
max_response_length = 256 # 麦麦回答的最大token数
|
||||||
ban_words = [
|
ban_words = [
|
||||||
# "403","张三"
|
# "403","张三"
|
||||||
@@ -87,10 +87,9 @@ response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听
|
|||||||
down_frequency_rate = 3 # 降低回复频率的群组回复意愿降低系数 除法
|
down_frequency_rate = 3 # 降低回复频率的群组回复意愿降低系数 除法
|
||||||
emoji_response_penalty = 0.1 # 表情包回复惩罚系数,设为0为不回复单个表情包,减少单独回复表情包的概率
|
emoji_response_penalty = 0.1 # 表情包回复惩罚系数,设为0为不回复单个表情包,减少单独回复表情包的概率
|
||||||
|
|
||||||
[response]
|
[response] #这些选项已无效
|
||||||
model_r1_probability = 0.8 # 麦麦回答时选择主要回复模型1 模型的概率
|
model_r1_probability = 0 # 麦麦回答时选择主要回复模型1 模型的概率
|
||||||
model_v3_probability = 0.1 # 麦麦回答时选择次要回复模型2 模型的概率
|
model_v3_probability = 1.0 # 麦麦回答时选择次要回复模型2 模型的概率
|
||||||
model_r1_distill_probability = 0.1 # 麦麦回答时选择次要回复模型3 模型的概率
|
|
||||||
|
|
||||||
[emoji]
|
[emoji]
|
||||||
check_interval = 15 # 检查破损表情包的时间间隔(分钟)
|
check_interval = 15 # 检查破损表情包的时间间隔(分钟)
|
||||||
@@ -159,22 +158,16 @@ enable_friend_chat = false # 是否启用好友聊天
|
|||||||
# stream = <true|false> : 用于指定模型是否是使用流式输出
|
# stream = <true|false> : 用于指定模型是否是使用流式输出
|
||||||
# 如果不指定,则该项是 False
|
# 如果不指定,则该项是 False
|
||||||
|
|
||||||
[model.llm_reasoning] #回复模型1 主要回复模型
|
[model.llm_reasoning] #暂时未使用
|
||||||
name = "Pro/deepseek-ai/DeepSeek-R1"
|
name = "Pro/deepseek-ai/DeepSeek-R1"
|
||||||
# name = "Qwen/QwQ-32B"
|
# name = "Qwen/QwQ-32B"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 4 #模型的输入价格(非必填,可以记录消耗)
|
pri_in = 4 #模型的输入价格(非必填,可以记录消耗)
|
||||||
pri_out = 16 #模型的输出价格(非必填,可以记录消耗)
|
pri_out = 16 #模型的输出价格(非必填,可以记录消耗)
|
||||||
|
|
||||||
[model.llm_reasoning_minor] #回复模型3 次要回复模型
|
|
||||||
name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
|
|
||||||
provider = "SILICONFLOW"
|
|
||||||
pri_in = 1.26 #模型的输入价格(非必填,可以记录消耗)
|
|
||||||
pri_out = 1.26 #模型的输出价格(非必填,可以记录消耗)
|
|
||||||
|
|
||||||
#非推理模型
|
#非推理模型
|
||||||
|
|
||||||
[model.llm_normal] #V3 回复模型2 次要回复模型
|
[model.llm_normal] #V3 回复模型1 主要回复模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
||||||
|
|||||||
Reference in New Issue
Block a user