Merge branch 'main-fix' of https://github.com/SengokuCola/MaiMBot into refactor

This commit is contained in:
tcmofashi
2025-03-28 12:00:33 +08:00
7 changed files with 194 additions and 84 deletions

View File

@@ -0,0 +1,172 @@
import time
import asyncio
import random
from random import random as random_float
from typing import Dict
from ..config.config import global_config
from .message import MessageSending, MessageThinking, MessageSet, MessageRecv
from .message_base import UserInfo, Seg
from .message_sender import message_manager
from ..moods.moods import MoodManager
from .llm_generator import ResponseGenerator
from src.common.logger import get_module_logger
from src.think_flow_demo.heartflow import subheartflow_manager
from ...common.database import db
logger = get_module_logger("auto_speak")
class AutoSpeakManager:
def __init__(self):
self._last_auto_speak_time: Dict[str, float] = {} # 记录每个聊天流上次自主发言的时间
self.mood_manager = MoodManager.get_instance()
self.gpt = ResponseGenerator() # 添加gpt实例
self._started = False
self._check_task = None
self.db = db
async def get_chat_info(self, chat_id: str) -> dict:
"""从数据库获取聊天流信息"""
chat_info = await self.db.chat_streams.find_one({"stream_id": chat_id})
return chat_info
async def start_auto_speak_check(self):
"""启动自动发言检查任务"""
if not self._started:
self._check_task = asyncio.create_task(self._periodic_check())
self._started = True
logger.success("自动发言检查任务已启动")
async def _periodic_check(self):
"""定期检查是否需要自主发言"""
while True and global_config.enable_think_flow:
# 获取所有活跃的子心流
active_subheartflows = []
for chat_id, subheartflow in subheartflow_manager._subheartflows.items():
if subheartflow.is_active and subheartflow.current_state.willing > 0: # 只考虑活跃且意愿值大于0.5的子心流
active_subheartflows.append((chat_id, subheartflow))
logger.debug(f"发现活跃子心流 - 聊天ID: {chat_id}, 意愿值: {subheartflow.current_state.willing:.2f}")
if not active_subheartflows:
logger.debug("当前没有活跃的子心流")
await asyncio.sleep(20) # 添加异步等待
continue
# 随机选择一个活跃的子心流
chat_id, subheartflow = random.choice(active_subheartflows)
logger.info(f"随机选择子心流 - 聊天ID: {chat_id}, 意愿值: {subheartflow.current_state.willing:.2f}")
# 检查是否应该自主发言
if await self.check_auto_speak(subheartflow):
logger.info(f"准备自主发言 - 聊天ID: {chat_id}")
# 生成自主发言
bot_user_info = UserInfo(
user_id=global_config.BOT_QQ,
user_nickname=global_config.BOT_NICKNAME,
platform="qq", # 默认使用qq平台
)
# 创建一个空的MessageRecv对象作为上下文
message = MessageRecv({
"message_info": {
"user_info": {
"user_id": chat_id,
"user_nickname": "",
"platform": "qq"
},
"group_info": None,
"platform": "qq",
"time": time.time()
},
"processed_plain_text": "",
"raw_message": "",
"is_emoji": False
})
await self.generate_auto_speak(subheartflow, message, bot_user_info, message.message_info["user_info"], message.message_info)
else:
logger.debug(f"不满足自主发言条件 - 聊天ID: {chat_id}")
# 每分钟检查一次
await asyncio.sleep(20)
# await asyncio.sleep(5) # 发生错误时等待5秒再继续
async def check_auto_speak(self, subheartflow) -> bool:
"""检查是否应该自主发言"""
if not subheartflow:
return False
current_time = time.time()
chat_id = subheartflow.observe_chat_id
# 获取上次自主发言时间
if chat_id not in self._last_auto_speak_time:
self._last_auto_speak_time[chat_id] = 0
last_speak_time = self._last_auto_speak_time.get(chat_id, 0)
# 如果距离上次自主发言不到5分钟不发言
if current_time - last_speak_time < 30:
logger.debug(f"距离上次发言时间太短 - 聊天ID: {chat_id}, 剩余时间: {30 - (current_time - last_speak_time):.1f}")
return False
# 获取当前意愿值
current_willing = subheartflow.current_state.willing
if current_willing > 0.1 and random_float() < 0.5:
self._last_auto_speak_time[chat_id] = current_time
logger.info(f"满足自主发言条件 - 聊天ID: {chat_id}, 意愿值: {current_willing:.2f}")
return True
logger.debug(f"不满足自主发言条件 - 聊天ID: {chat_id}, 意愿值: {current_willing:.2f}")
return False
async def generate_auto_speak(self, subheartflow, message, bot_user_info: UserInfo, userinfo, messageinfo):
"""生成自主发言内容"""
thinking_time_point = round(time.time(), 2)
think_id = "mt" + str(thinking_time_point)
thinking_message = MessageThinking(
message_id=think_id,
chat_stream=None, # 不需要chat_stream
bot_user_info=bot_user_info,
reply=message,
thinking_start_time=thinking_time_point,
)
message_manager.add_message(thinking_message)
# 生成自主发言内容
response, raw_content = await self.gpt.generate_response(message)
if response:
message_set = MessageSet(None, think_id) # 不需要chat_stream
mark_head = False
for msg in response:
message_segment = Seg(type="text", data=msg)
bot_message = MessageSending(
message_id=think_id,
chat_stream=None, # 不需要chat_stream
bot_user_info=bot_user_info,
sender_info=userinfo,
message_segment=message_segment,
reply=message,
is_head=not mark_head,
is_emoji=False,
thinking_start_time=thinking_time_point,
)
if not mark_head:
mark_head = True
message_set.add_message(bot_message)
message_manager.add_message(message_set)
# 更新情绪和关系
stance, emotion = await self.gpt._get_emotion_tags(raw_content, message.processed_plain_text)
self.mood_manager.update_mood_from_emotion(emotion, global_config.mood_intensity_factor)
return True
return False
# 创建全局AutoSpeakManager实例
auto_speak_manager = AutoSpeakManager()

View File

@@ -113,16 +113,24 @@ class ChatBot:
# 根据话题计算激活度 # 根据话题计算激活度
topic = "" topic = ""
await self.storage.store_message(message, chat, topic[0] if topic else None)
interested_rate = 0
interested_rate = await HippocampusManager.get_instance().get_activate_from_text( interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
message.processed_plain_text, fast_retrieval=True message.processed_plain_text, fast_retrieval=True
) )
# interested_rate = 0.1
# logger.info(f"对{message.processed_plain_text}的激活度:{interested_rate}")
# logger.info(f"\033[1;32m[主题识别]\033[0m 使用{global_config.topic_extract}主题: {topic}")
await self.storage.store_message(message, chat, topic[0] if topic else None)
is_mentioned = is_mentioned_bot_in_message(message) is_mentioned = is_mentioned_bot_in_message(message)
if global_config.enable_think_flow:
current_willing_old = willing_manager.get_willing(chat_stream=chat)
current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4
print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}")
current_willing = (current_willing_old + current_willing_new) / 2
else:
current_willing = willing_manager.get_willing(chat_stream=chat)
willing_manager.set_willing(chat.stream_id, current_willing)
reply_probability = await willing_manager.change_reply_willing_received( reply_probability = await willing_manager.change_reply_willing_received(
chat_stream=chat, chat_stream=chat,
is_mentioned_bot=is_mentioned, is_mentioned_bot=is_mentioned,
@@ -131,10 +139,6 @@ class ChatBot:
interested_rate=interested_rate, interested_rate=interested_rate,
sender_id=str(message.message_info.user_info.user_id), sender_id=str(message.message_info.user_info.user_id),
) )
current_willing_old = willing_manager.get_willing(chat_stream=chat)
current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing - 5) / 4
print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}")
current_willing = (current_willing_old + current_willing_new) / 2
logger.info( logger.info(
f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]" f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]"

View File

@@ -1225,7 +1225,7 @@ class Hippocampus:
total_nodes = len(self.memory_graph.G.nodes()) total_nodes = len(self.memory_graph.G.nodes())
# activated_nodes = len(activate_map) # activated_nodes = len(activate_map)
activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0 activation_ratio = total_activation / total_nodes if total_nodes > 0 else 0
activation_ratio = activation_ratio*40 activation_ratio = activation_ratio*60
logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}") logger.info(f"总激活值: {total_activation:.2f}, 总节点数: {total_nodes}, 激活: {activation_ratio}")
return activation_ratio return activation_ratio

View File

@@ -1,70 +0,0 @@
import asyncio
import os
import aiohttp
from src.common.logger import get_module_logger
logger = get_module_logger("offline_llm")
class LLMModel:
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
self.api_key = os.getenv("SILICONFLOW_KEY")
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
if not self.api_key or not self.base_url:
raise ValueError("环境变量未正确加载SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
async def generate_response_async(self, prompt: str) -> str:
"""异步方式根据输入的提示生成模型的响应"""
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.7,
**self.params,
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15
async with aiohttp.ClientSession() as session:
for retry in range(max_retries):
try:
async with session.post(api_url, headers=headers, json=data) as response:
if response.status == 429:
wait_time = base_wait_time * (2**retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = await response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2**retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""

View File

@@ -72,7 +72,7 @@ class ScheduleGenerator:
self.print_schedule() self.print_schedule()
while True: while True:
print(self.get_current_num_task(1, True)) # print(self.get_current_num_task(1, True))
current_time = datetime.datetime.now() current_time = datetime.datetime.now()

View File

@@ -223,7 +223,7 @@ class LLMStatistics:
logger.exception("统计数据处理失败") logger.exception("统计数据处理失败")
# 等待5分钟 # 等待5分钟
for _ in range(300): # 5分钟 = 300秒 for _ in range(30): # 5分钟 = 300秒
if not self.running: if not self.running:
break break
time.sleep(1) time.sleep(1)

View File

@@ -49,6 +49,8 @@ class SubHeartflow:
self.personality_info = " ".join(global_config.PROMPT_PERSONALITY) self.personality_info = " ".join(global_config.PROMPT_PERSONALITY)
self.is_active = False
def assign_observe(self,stream_id): def assign_observe(self,stream_id):
self.outer_world = outer_world.get_world_by_stream_id(stream_id) self.outer_world = outer_world.get_world_by_stream_id(stream_id)
self.observe_chat_id = stream_id self.observe_chat_id = stream_id
@@ -58,8 +60,10 @@ class SubHeartflow:
current_time = time.time() current_time = time.time()
if current_time - self.last_reply_time > 180: # 3分钟 = 180秒 if current_time - self.last_reply_time > 180: # 3分钟 = 180秒
# print(f"{self.observe_chat_id}麦麦已经3分钟没有回复了暂时停止思考") # print(f"{self.observe_chat_id}麦麦已经3分钟没有回复了暂时停止思考")
self.is_active = False
await asyncio.sleep(60) # 每30秒检查一次 await asyncio.sleep(60) # 每30秒检查一次
else: else:
self.is_active = True
await self.do_a_thinking() await self.do_a_thinking()
await self.judge_willing() await self.judge_willing()
await asyncio.sleep(60) await asyncio.sleep(60)
@@ -88,7 +92,7 @@ class SubHeartflow:
else: else:
related_memory_info = '' related_memory_info = ''
print(f"相关记忆:{related_memory_info}") # print(f"相关记忆:{related_memory_info}")
schedule_info = bot_schedule.get_current_num_task(num = 1,time_info = False) schedule_info = bot_schedule.get_current_num_task(num = 1,time_info = False)