feat:麦麦会产生thinking,并发送内容到直播流
This commit is contained in:
@@ -14,13 +14,15 @@ from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_with_ch
|
||||
from src.chat.planner_actions.planner import ActionPlanner
|
||||
from src.chat.planner_actions.action_modifier import ActionModifier
|
||||
from src.chat.planner_actions.action_manager import ActionManager
|
||||
from src.chat.focus_chat.hfc_utils import CycleDetail
|
||||
from src.chat.chat_loop.hfc_utils import CycleDetail
|
||||
from src.person_info.relationship_builder_manager import relationship_builder_manager
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
from src.plugin_system.base.component_types import ActionInfo, ChatMode
|
||||
from src.plugin_system.apis import generator_api, send_api, message_api
|
||||
from src.chat.willing.willing_manager import get_willing_manager
|
||||
from src.chat.mai_thinking.mai_think import mai_thinking_manager
|
||||
|
||||
ENABLE_THINKING = True
|
||||
|
||||
ERROR_LOOP_INFO = {
|
||||
"loop_plan_info": {
|
||||
@@ -331,7 +333,11 @@ class HeartFChatting:
|
||||
logger.info(f"[{self.log_prefix}] {global_config.bot.nickname} 决定的回复内容: {content}")
|
||||
|
||||
# 发送回复 (不再需要传入 chat)
|
||||
await self._send_response(response_set, reply_to_str, loop_start_time,message_data)
|
||||
reply_text = await self._send_response(response_set, reply_to_str, loop_start_time,message_data)
|
||||
|
||||
if ENABLE_THINKING:
|
||||
await mai_thinking_manager.get_mai_think(self.stream_id).do_think_after_response(reply_text)
|
||||
|
||||
|
||||
return True
|
||||
|
||||
@@ -2,7 +2,7 @@ from rich.traceback import install
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.focus_chat.heartFC_chat import HeartFChatting
|
||||
from src.chat.chat_loop.heartFC_chat import HeartFChatting
|
||||
from src.chat.utils.utils import get_chat_type_and_target_info
|
||||
|
||||
logger = get_logger("sub_heartflow")
|
||||
|
||||
182
src/chat/mai_thinking/mai_think.py
Normal file
182
src/chat/mai_thinking/mai_think.py
Normal file
@@ -0,0 +1,182 @@
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
import time
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.message import MessageSending, MessageRecv, MessageRecvS4U
|
||||
from src.mais4u.mais4u_chat.s4u_msg_processor import S4UMessageProcessor
|
||||
|
||||
from src.common.logger import get_logger
|
||||
logger = get_logger(__name__)
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
你之前的内心想法是:{mind}
|
||||
|
||||
{memory_block}
|
||||
{relation_info_block}
|
||||
|
||||
{chat_target}
|
||||
{time_block}
|
||||
{chat_info}
|
||||
{identity}
|
||||
|
||||
你刚刚在{chat_target_2},你你刚刚的心情是:{mood_state}
|
||||
---------------------
|
||||
在这样的情况下,你对上面的内容,你对 {sender} 发送的 消息 “{target}” 进行了回复
|
||||
你刚刚选择回复的内容是:{reponse}
|
||||
现在,根据你之前的想法和回复的内容,推测你现在的想法,思考你现在的想法是什么,为什么做出上面的回复内容
|
||||
请不要浮夸和夸张修辞,不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出想法:""",
|
||||
"after_response_think_prompt",
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
class MaiThinking:
|
||||
def __init__(self,chat_id):
|
||||
self.chat_id = chat_id
|
||||
self.chat_stream = get_chat_manager().get_stream(chat_id)
|
||||
self.platform = self.chat_stream.platform
|
||||
|
||||
if self.chat_stream.group_info:
|
||||
self.is_group = True
|
||||
else:
|
||||
self.is_group = False
|
||||
|
||||
self.s4u_message_processor = S4UMessageProcessor()
|
||||
|
||||
self.mind = ""
|
||||
|
||||
self.memory_block = ""
|
||||
self.relation_info_block = ""
|
||||
self.time_block = ""
|
||||
self.chat_target = ""
|
||||
self.chat_target_2 = ""
|
||||
self.chat_info = ""
|
||||
self.mood_state = ""
|
||||
self.identity = ""
|
||||
self.sender = ""
|
||||
self.target = ""
|
||||
|
||||
self.thinking_model = LLMRequest(
|
||||
model=global_config.model.replyer_1,
|
||||
request_type="thinking",
|
||||
)
|
||||
|
||||
async def do_think_before_response(self):
|
||||
pass
|
||||
|
||||
async def do_think_after_response(self,reponse:str):
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"after_response_think_prompt",
|
||||
mind=self.mind,
|
||||
reponse=reponse,
|
||||
memory_block=self.memory_block,
|
||||
relation_info_block=self.relation_info_block,
|
||||
time_block=self.time_block,
|
||||
chat_target=self.chat_target,
|
||||
chat_target_2=self.chat_target_2,
|
||||
chat_info=self.chat_info,
|
||||
mood_state=self.mood_state,
|
||||
identity=self.identity,
|
||||
sender=self.sender,
|
||||
target=self.target,
|
||||
)
|
||||
|
||||
result, _ = await self.thinking_model.generate_response_async(prompt)
|
||||
self.mind = result
|
||||
|
||||
logger.info(f"[{self.chat_id}] 思考前想法:{self.mind}")
|
||||
logger.info(f"[{self.chat_id}] 思考前prompt:{prompt}")
|
||||
logger.info(f"[{self.chat_id}] 思考后想法:{self.mind}")
|
||||
|
||||
|
||||
msg_recv = await self.build_internal_message_recv(self.mind)
|
||||
await self.s4u_message_processor.process_message(msg_recv)
|
||||
|
||||
|
||||
async def do_think_when_receive_message(self):
|
||||
pass
|
||||
|
||||
async def build_internal_message_recv(self,message_text:str):
|
||||
|
||||
msg_id = f"internal_{time.time()}"
|
||||
|
||||
message_dict = {
|
||||
"message_info": {
|
||||
"message_id": msg_id,
|
||||
"time": time.time(),
|
||||
"user_info": {
|
||||
"user_id": "internal", # 内部用户ID
|
||||
"user_nickname": "内心", # 内部昵称
|
||||
"platform": self.platform, # 平台标记为 internal
|
||||
# 其他 user_info 字段按需补充
|
||||
},
|
||||
"platform": self.platform, # 平台
|
||||
# 其他 message_info 字段按需补充
|
||||
},
|
||||
"message_segment": {
|
||||
"type": "text", # 消息类型
|
||||
"data": message_text, # 消息内容
|
||||
# 其他 segment 字段按需补充
|
||||
},
|
||||
"raw_message": message_text, # 原始消息内容
|
||||
"processed_plain_text": message_text, # 处理后的纯文本
|
||||
# 下面这些字段可选,根据 MessageRecv 需要
|
||||
"is_emoji": False,
|
||||
"has_emoji": False,
|
||||
"is_picid": False,
|
||||
"has_picid": False,
|
||||
"is_voice": False,
|
||||
"is_mentioned": False,
|
||||
"is_command": False,
|
||||
"is_internal": True,
|
||||
"priority_mode": "interest",
|
||||
"priority_info": {"message_priority": 10.0}, # 内部消息可设高优先级
|
||||
"interest_value": 1.0,
|
||||
}
|
||||
|
||||
if self.is_group:
|
||||
message_dict["message_info"]["group_info"] = {
|
||||
"platform": self.platform,
|
||||
"group_id": self.chat_stream.group_info.group_id,
|
||||
"group_name": self.chat_stream.group_info.group_name,
|
||||
}
|
||||
|
||||
msg_recv = MessageRecvS4U(message_dict)
|
||||
msg_recv.chat_info = self.chat_info
|
||||
msg_recv.chat_stream = self.chat_stream
|
||||
msg_recv.is_internal = True
|
||||
|
||||
return msg_recv
|
||||
|
||||
|
||||
|
||||
|
||||
class MaiThinkingManager:
|
||||
def __init__(self):
|
||||
self.mai_think_list = []
|
||||
|
||||
def get_mai_think(self,chat_id):
|
||||
for mai_think in self.mai_think_list:
|
||||
if mai_think.chat_id == chat_id:
|
||||
return mai_think
|
||||
mai_think = MaiThinking(chat_id)
|
||||
self.mai_think_list.append(mai_think)
|
||||
return mai_think
|
||||
|
||||
mai_thinking_manager = MaiThinkingManager()
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -208,8 +208,11 @@ class MessageRecvS4U(MessageRecv):
|
||||
self.superchat_price = None
|
||||
self.superchat_message_text = None
|
||||
self.is_screen = False
|
||||
self.is_internal = False
|
||||
self.voice_done = None
|
||||
|
||||
self.chat_info = None
|
||||
|
||||
async def process(self) -> None:
|
||||
self.processed_plain_text = await self._process_message_segments(self.message_segment)
|
||||
|
||||
|
||||
@@ -102,6 +102,7 @@ class MessageStorage:
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("存储消息失败")
|
||||
logger.error(f"消息:{message}")
|
||||
traceback.print_exc()
|
||||
|
||||
# 如果需要其他存储相关的函数,可以在这里添加
|
||||
|
||||
@@ -6,7 +6,7 @@ import re
|
||||
|
||||
from typing import List, Optional, Dict, Any, Tuple
|
||||
from datetime import datetime
|
||||
|
||||
from src.chat.mai_thinking.mai_think import mai_thinking_manager
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.individuality.individuality import get_individuality
|
||||
@@ -740,6 +740,26 @@ class DefaultReplyer:
|
||||
message_list_before_now_long, target_user_id
|
||||
)
|
||||
|
||||
mai_think = mai_thinking_manager.get_mai_think(chat_id)
|
||||
mai_think.memory_block = memory_block
|
||||
mai_think.relation_info_block = relation_info
|
||||
mai_think.time_block = time_block
|
||||
mai_think.chat_target = chat_target_1
|
||||
mai_think.chat_target_2 = chat_target_2
|
||||
# mai_think.chat_info = chat_talking_prompt
|
||||
mai_think.mood_state = mood_prompt
|
||||
mai_think.identity = identity_block
|
||||
mai_think.sender = sender
|
||||
mai_think.target = target
|
||||
|
||||
mai_think.chat_info = f"""
|
||||
{background_dialogue_prompt}
|
||||
--------------------------------
|
||||
{time_block}
|
||||
这是你和{sender}的对话,你们正在交流中:
|
||||
{core_dialogue_prompt}"""
|
||||
|
||||
|
||||
# 使用 s4u 风格的模板
|
||||
template_name = "s4u_style_prompt"
|
||||
|
||||
@@ -765,6 +785,18 @@ class DefaultReplyer:
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
)
|
||||
else:
|
||||
mai_think = mai_thinking_manager.get_mai_think(chat_id)
|
||||
mai_think.memory_block = memory_block
|
||||
mai_think.relation_info_block = relation_info
|
||||
mai_think.time_block = time_block
|
||||
mai_think.chat_target = chat_target_1
|
||||
mai_think.chat_target_2 = chat_target_2
|
||||
mai_think.chat_info = chat_talking_prompt
|
||||
mai_think.mood_state = mood_prompt
|
||||
mai_think.identity = identity_block
|
||||
mai_think.sender = sender
|
||||
mai_think.target = target
|
||||
|
||||
# 使用原有的模式
|
||||
return await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
|
||||
@@ -2,7 +2,7 @@ import asyncio
|
||||
import traceback
|
||||
import time
|
||||
import random
|
||||
from typing import Optional, Dict, Tuple # 导入类型提示
|
||||
from typing import Optional, Dict, Tuple, List # 导入类型提示
|
||||
from maim_message import UserInfo, Seg
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
|
||||
@@ -42,6 +42,8 @@ class MessageSenderContainer:
|
||||
self.voice_done = ""
|
||||
|
||||
|
||||
|
||||
|
||||
async def add_message(self, chunk: str):
|
||||
"""向队列中添加一个消息块。"""
|
||||
await self.queue.put(chunk)
|
||||
@@ -195,6 +197,7 @@ class S4UChat:
|
||||
self.gpt = S4UStreamGenerator()
|
||||
self.interest_dict: Dict[str, float] = {} # 用户兴趣分
|
||||
|
||||
self.internal_message :List[MessageRecvS4U] = []
|
||||
|
||||
self.msg_id = ""
|
||||
self.voice_done = ""
|
||||
@@ -240,7 +243,7 @@ class S4UChat:
|
||||
score += self._get_interest_score(message.message_info.user_info.user_id)
|
||||
return score
|
||||
|
||||
def decay_interest_score(self,message: MessageRecvS4U|MessageRecv):
|
||||
def decay_interest_score(self):
|
||||
for person_id, score in self.interest_dict.items():
|
||||
if score > 0:
|
||||
self.interest_dict[person_id] = score * 0.95
|
||||
@@ -249,7 +252,7 @@ class S4UChat:
|
||||
|
||||
async def add_message(self, message: MessageRecvS4U|MessageRecv) -> None:
|
||||
|
||||
self.decay_interest_score(message)
|
||||
self.decay_interest_score()
|
||||
|
||||
"""根据VIP状态和中断逻辑将消息放入相应队列。"""
|
||||
user_id = message.message_info.user_info.user_id
|
||||
@@ -259,8 +262,8 @@ class S4UChat:
|
||||
try:
|
||||
is_gift = message.is_gift
|
||||
is_superchat = message.is_superchat
|
||||
print(is_gift)
|
||||
print(is_superchat)
|
||||
# print(is_gift)
|
||||
# print(is_superchat)
|
||||
if is_gift:
|
||||
await self.relationship_builder.build_relation(immediate_build=person_id)
|
||||
# 安全地增加兴趣分,如果person_id不存在则先初始化为1.0
|
||||
@@ -388,18 +391,49 @@ class S4UChat:
|
||||
queue_name = "vip"
|
||||
# 其次处理普通队列
|
||||
elif not self._normal_queue.empty():
|
||||
neg_priority, entry_count, timestamp, message = self._normal_queue.get_nowait()
|
||||
priority = -neg_priority
|
||||
# 检查普通消息是否超时
|
||||
if time.time() - timestamp > s4u_config.message_timeout_seconds:
|
||||
logger.info(
|
||||
f"[{self.stream_name}] Discarding stale normal message: {message.processed_plain_text[:20]}..."
|
||||
)
|
||||
self._normal_queue.task_done()
|
||||
continue # 处理下一条
|
||||
queue_name = "normal"
|
||||
# 判断 normal 队列是否只有一条消息,且 internal_message 有内容
|
||||
if self._normal_queue.qsize() == 1 and self.internal_message:
|
||||
if random.random() < 0.5:
|
||||
# 50% 概率用 internal_message 最新一条
|
||||
message = self.internal_message[-1]
|
||||
priority = 0 # internal_message 没有优先级,设为 0
|
||||
queue_name = "internal"
|
||||
neg_priority = 0
|
||||
entry_count = 0
|
||||
logger.info(f"[{self.stream_name}] 触发 internal_message 生成回复: {getattr(message, 'processed_plain_text', str(message))[:20]}...")
|
||||
# 不要从 normal 队列取出消息,保留在队列中
|
||||
else:
|
||||
neg_priority, entry_count, timestamp, message = self._normal_queue.get_nowait()
|
||||
priority = -neg_priority
|
||||
# 检查普通消息是否超时
|
||||
if time.time() - timestamp > s4u_config.message_timeout_seconds:
|
||||
logger.info(
|
||||
f"[{self.stream_name}] Discarding stale normal message: {message.processed_plain_text[:20]}..."
|
||||
)
|
||||
self._normal_queue.task_done()
|
||||
continue # 处理下一条
|
||||
queue_name = "normal"
|
||||
else:
|
||||
neg_priority, entry_count, timestamp, message = self._normal_queue.get_nowait()
|
||||
priority = -neg_priority
|
||||
# 检查普通消息是否超时
|
||||
if time.time() - timestamp > s4u_config.message_timeout_seconds:
|
||||
logger.info(
|
||||
f"[{self.stream_name}] Discarding stale normal message: {message.processed_plain_text[:20]}..."
|
||||
)
|
||||
self._normal_queue.task_done()
|
||||
continue # 处理下一条
|
||||
queue_name = "normal"
|
||||
else:
|
||||
continue # 没有消息了,回去等事件
|
||||
if self.internal_message:
|
||||
message = self.internal_message[-1]
|
||||
priority = 0
|
||||
neg_priority = 0
|
||||
entry_count = 0
|
||||
queue_name = "internal"
|
||||
logger.info(f"[{self.stream_name}] normal/vip 队列都空,触发 internal_message 回复: {getattr(message, 'processed_plain_text', str(message))[:20]}...")
|
||||
else:
|
||||
continue # 没有消息了,回去等事件
|
||||
|
||||
self._current_message_being_replied = (queue_name, priority, entry_count, message)
|
||||
self._current_generation_task = asyncio.create_task(self._generate_and_send(message))
|
||||
@@ -421,6 +455,9 @@ class S4UChat:
|
||||
# 标记任务完成
|
||||
if queue_name == "vip":
|
||||
self._vip_queue.task_done()
|
||||
elif queue_name == "internal":
|
||||
# 如果使用 internal_message 生成回复,则不从 normal 队列中移除
|
||||
pass
|
||||
else:
|
||||
self._normal_queue.task_done()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Tuple
|
||||
|
||||
from src.chat.memory_system.Hippocampus import hippocampus_manager
|
||||
from src.chat.message_receive.message import MessageRecv, MessageRecvS4U
|
||||
from maim_message.message_base import GroupInfo,UserInfo
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.utils.timer_calculator import Timer
|
||||
@@ -93,6 +94,9 @@ class S4UMessageProcessor:
|
||||
group_info=groupinfo,
|
||||
)
|
||||
|
||||
if await self.handle_internal_message(message):
|
||||
return
|
||||
|
||||
if await self.hadle_if_voice_done(message):
|
||||
return
|
||||
|
||||
@@ -137,6 +141,32 @@ class S4UMessageProcessor:
|
||||
else:
|
||||
logger.info(f"[S4U]{userinfo.user_nickname}:{message.processed_plain_text}")
|
||||
|
||||
async def handle_internal_message(self, message: MessageRecvS4U):
|
||||
if message.is_internal:
|
||||
|
||||
group_info = GroupInfo(platform = "amaidesu_default",group_id = 114514,group_name = "内心")
|
||||
|
||||
chat = await get_chat_manager().get_or_create_stream(
|
||||
platform = "amaidesu_default",
|
||||
user_info = message.message_info.user_info,
|
||||
group_info = group_info
|
||||
)
|
||||
s4u_chat = get_s4u_chat_manager().get_or_create_chat(chat)
|
||||
message.message_info.group_info = s4u_chat.chat_stream.group_info
|
||||
message.message_info.platform = s4u_chat.chat_stream.platform
|
||||
|
||||
|
||||
s4u_chat.internal_message.append(message)
|
||||
s4u_chat._new_message_event.set()
|
||||
|
||||
|
||||
logger.info(f"[{s4u_chat.stream_name}] 添加内部消息-------------------------------------------------------: {message.processed_plain_text}")
|
||||
|
||||
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def handle_screen_message(self, message: MessageRecvS4U):
|
||||
if message.is_screen:
|
||||
screen_manager.set_screen(message.screen_info)
|
||||
|
||||
@@ -16,6 +16,7 @@ from src.mais4u.mais4u_chat.super_chat_manager import get_super_chat_manager
|
||||
from src.mais4u.mais4u_chat.screen_manager import screen_manager
|
||||
from src.chat.express.expression_selector import expression_selector
|
||||
from .s4u_mood_manager import mood_manager
|
||||
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
|
||||
|
||||
logger = get_logger("prompt")
|
||||
|
||||
@@ -59,6 +60,37 @@ def init_prompt():
|
||||
"s4u_prompt", # New template for private CHAT chat
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你的名字是麦麦, 是千石可乐开发的程序,可以在QQ,微信等平台发言,你现在正在哔哩哔哩作为虚拟主播进行直播
|
||||
虽然是个AI,但你在努力扮演真人, 人设的配色是橙色和紫色和绿色
|
||||
你有一头法式橙色卷发,你很可爱,穿戴英式侦探风格学院风裙子和帽子,你正在一个教室场景里进行虚拟线上直播。不过不要刻意提及这些内容,自然回复
|
||||
你可以看见用户发送的弹幕,礼物和superchat
|
||||
你可以看见面前的屏幕,目前屏幕的内容是:
|
||||
{screen_info}
|
||||
|
||||
{memory_block}
|
||||
{expression_habits_block}
|
||||
|
||||
{sc_info}
|
||||
|
||||
{time_block}
|
||||
{chat_info_danmu}
|
||||
--------------------------------
|
||||
以上是你和弹幕的对话,与此同时,你在与QQ群友聊天,聊天记录如下:
|
||||
{chat_info_qq}
|
||||
--------------------------------
|
||||
你刚刚回复了QQ群,你内心的想法是:{mind}
|
||||
请根据你内心的想法,组织一条回复,在直播间进行发言,可以点名吐槽对象,让观众知道你在说谁
|
||||
{gift_info}
|
||||
回复简短一些,平淡一些,可以参考贴吧,知乎和微博的回复风格。不要浮夸,有逻辑和条理。
|
||||
表现的有个性,不要随意服从他人要求,积极互动。你现在的心情是:{mood_state}
|
||||
不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。
|
||||
你的发言:
|
||||
""",
|
||||
"s4u_prompt_internal", # New template for private CHAT chat
|
||||
)
|
||||
|
||||
|
||||
class PromptBuilder:
|
||||
def __init__(self):
|
||||
@@ -148,6 +180,7 @@ class PromptBuilder:
|
||||
limit=300,
|
||||
)
|
||||
|
||||
|
||||
talk_type = message.message_info.platform + ":" + str(message.chat_stream.user_info.user_id)
|
||||
|
||||
core_dialogue_list = []
|
||||
@@ -219,7 +252,20 @@ class PromptBuilder:
|
||||
for msg in all_msg_seg_list:
|
||||
core_msg_str += msg
|
||||
|
||||
return core_msg_str, background_dialogue_prompt
|
||||
|
||||
all_dialogue_prompt = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=20,
|
||||
)
|
||||
all_dialogue_prompt_str = build_readable_messages(
|
||||
all_dialogue_prompt,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
show_pic=False,
|
||||
)
|
||||
|
||||
|
||||
return core_msg_str, background_dialogue_prompt,all_dialogue_prompt_str
|
||||
|
||||
def build_gift_info(self, message: MessageRecvS4U):
|
||||
if message.is_gift:
|
||||
@@ -234,18 +280,34 @@ class PromptBuilder:
|
||||
super_chat_manager = get_super_chat_manager()
|
||||
return super_chat_manager.build_superchat_summary_string(message.chat_stream.stream_id)
|
||||
|
||||
|
||||
async def build_prompt_normal(
|
||||
self,
|
||||
message: MessageRecvS4U,
|
||||
chat_stream: ChatStream,
|
||||
message_txt: str,
|
||||
sender_name: str = "某人",
|
||||
) -> str:
|
||||
|
||||
person_id = PersonInfoManager.get_person_id(
|
||||
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
|
||||
)
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
|
||||
if message.chat_stream.user_info.user_nickname:
|
||||
if person_name:
|
||||
sender_name = f"[{message.chat_stream.user_info.user_nickname}](你叫ta{person_name})"
|
||||
else:
|
||||
sender_name = f"[{message.chat_stream.user_info.user_nickname}]"
|
||||
else:
|
||||
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||
|
||||
|
||||
relation_info_block, memory_block, expression_habits_block = await asyncio.gather(
|
||||
self.build_relation_info(chat_stream), self.build_memory_block(message_txt), self.build_expression_habits(chat_stream, message_txt, sender_name)
|
||||
)
|
||||
|
||||
core_dialogue_prompt, background_dialogue_prompt = self.build_chat_history_prompts(chat_stream, message)
|
||||
core_dialogue_prompt, background_dialogue_prompt,all_dialogue_prompt = self.build_chat_history_prompts(chat_stream, message)
|
||||
|
||||
gift_info = self.build_gift_info(message)
|
||||
|
||||
@@ -259,21 +321,39 @@ class PromptBuilder:
|
||||
|
||||
template_name = "s4u_prompt"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
time_block=time_block,
|
||||
expression_habits_block=expression_habits_block,
|
||||
relation_info_block=relation_info_block,
|
||||
memory_block=memory_block,
|
||||
screen_info=screen_info,
|
||||
gift_info=gift_info,
|
||||
sc_info=sc_info,
|
||||
sender_name=sender_name,
|
||||
core_dialogue_prompt=core_dialogue_prompt,
|
||||
background_dialogue_prompt=background_dialogue_prompt,
|
||||
message_txt=message_txt,
|
||||
mood_state=mood.mood_state,
|
||||
)
|
||||
if not message.is_internal:
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
time_block=time_block,
|
||||
expression_habits_block=expression_habits_block,
|
||||
relation_info_block=relation_info_block,
|
||||
memory_block=memory_block,
|
||||
screen_info=screen_info,
|
||||
gift_info=gift_info,
|
||||
sc_info=sc_info,
|
||||
sender_name=sender_name,
|
||||
core_dialogue_prompt=core_dialogue_prompt,
|
||||
background_dialogue_prompt=background_dialogue_prompt,
|
||||
message_txt=message_txt,
|
||||
mood_state=mood.mood_state,
|
||||
)
|
||||
else:
|
||||
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"s4u_prompt_internal",
|
||||
time_block=time_block,
|
||||
expression_habits_block=expression_habits_block,
|
||||
relation_info_block=relation_info_block,
|
||||
memory_block=memory_block,
|
||||
screen_info=screen_info,
|
||||
gift_info=gift_info,
|
||||
sc_info=sc_info,
|
||||
chat_info_danmu=all_dialogue_prompt,
|
||||
chat_info_qq=message.chat_info,
|
||||
mind=message.processed_plain_text,
|
||||
mood_state=mood.mood_state,
|
||||
)
|
||||
|
||||
print(prompt)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import os
|
||||
from typing import AsyncGenerator
|
||||
from src.mais4u.openai_client import AsyncOpenAIClient
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.message_receive.message import MessageRecvS4U
|
||||
from src.mais4u.mais4u_chat.s4u_prompt import prompt_builder
|
||||
from src.common.logger import get_logger
|
||||
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
|
||||
@@ -46,15 +46,7 @@ class S4UStreamGenerator:
|
||||
re.UNICODE | re.DOTALL,
|
||||
)
|
||||
|
||||
async def generate_response(
|
||||
self, message: MessageRecv, previous_reply_context: str = ""
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""根据当前模型类型选择对应的生成函数"""
|
||||
# 从global_config中获取模型概率值并选择模型
|
||||
self.partial_response = ""
|
||||
current_client = self.client_1
|
||||
self.current_model_name = self.model_1_name
|
||||
|
||||
async def build_last_internal_message(self,message:MessageRecvS4U,previous_reply_context:str = ""):
|
||||
person_id = PersonInfoManager.get_person_id(
|
||||
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
|
||||
)
|
||||
@@ -78,13 +70,30 @@ class S4UStreamGenerator:
|
||||
[这是用户发来的新消息, 你需要结合上下文,对此进行回复]:
|
||||
{message.processed_plain_text}
|
||||
"""
|
||||
return True,message_txt
|
||||
else:
|
||||
message_txt = message.processed_plain_text
|
||||
return False,message_txt
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
async def generate_response(
|
||||
self, message: MessageRecvS4U, previous_reply_context: str = ""
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""根据当前模型类型选择对应的生成函数"""
|
||||
# 从global_config中获取模型概率值并选择模型
|
||||
self.partial_response = ""
|
||||
message_txt = message.processed_plain_text
|
||||
if not message.is_internal:
|
||||
interupted,message_txt_added = await self.build_last_internal_message(message,previous_reply_context)
|
||||
if interupted:
|
||||
message_txt = message_txt_added
|
||||
|
||||
prompt = await prompt_builder.build_prompt_normal(
|
||||
message=message,
|
||||
message_txt=message_txt,
|
||||
sender_name=sender_name,
|
||||
chat_stream=message.chat_stream,
|
||||
)
|
||||
|
||||
@@ -92,6 +101,10 @@ class S4UStreamGenerator:
|
||||
f"{self.current_model_name}思考:{message_txt[:30] + '...' if len(message_txt) > 30 else message_txt}"
|
||||
) # noqa: E501
|
||||
|
||||
current_client = self.client_1
|
||||
self.current_model_name = self.model_1_name
|
||||
|
||||
|
||||
extra_kwargs = {}
|
||||
if self.replyer_1_config.get("enable_thinking") is not None:
|
||||
extra_kwargs["enable_thinking"] = self.replyer_1_config.get("enable_thinking")
|
||||
|
||||
@@ -25,12 +25,14 @@ from src.plugin_system.apis import generator_api, message_api
|
||||
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
|
||||
from src.plugins.built_in.core_actions.emoji import EmojiAction
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
from src.chat.mai_thinking.mai_think import mai_thinking_manager
|
||||
|
||||
logger = get_logger("core_actions")
|
||||
|
||||
# 常量定义
|
||||
WAITING_TIME_THRESHOLD = 1200 # 等待新消息时间阈值,单位秒
|
||||
|
||||
ENABLE_THINKING = True
|
||||
|
||||
class ReplyAction(BaseAction):
|
||||
"""回复动作 - 参与聊天回复"""
|
||||
@@ -131,6 +133,11 @@ class ReplyAction(BaseAction):
|
||||
# 存储动作记录
|
||||
reply_text = f"你对{person_name}进行了回复:{reply_text}"
|
||||
|
||||
|
||||
if ENABLE_THINKING:
|
||||
await mai_thinking_manager.get_mai_think(self.chat_id).do_think_after_response(reply_text)
|
||||
|
||||
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=False,
|
||||
action_prompt_display=reply_text,
|
||||
|
||||
Reference in New Issue
Block a user