fix:修正s4u的一些问题,修复表达方式共享失效的问题
This commit is contained in:
@@ -81,32 +81,70 @@ class ExpressionSelector:
|
|||||||
request_type="expression.selector",
|
request_type="expression.selector",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_stream_config_to_chat_id(stream_config_str: str) -> Optional[str]:
|
||||||
|
"""解析'platform:id:type'为chat_id(与get_stream_id一致)"""
|
||||||
|
try:
|
||||||
|
parts = stream_config_str.split(":")
|
||||||
|
if len(parts) != 3:
|
||||||
|
return None
|
||||||
|
platform = parts[0]
|
||||||
|
id_str = parts[1]
|
||||||
|
stream_type = parts[2]
|
||||||
|
is_group = stream_type == "group"
|
||||||
|
import hashlib
|
||||||
|
if is_group:
|
||||||
|
components = [platform, str(id_str)]
|
||||||
|
else:
|
||||||
|
components = [platform, str(id_str), "private"]
|
||||||
|
key = "_".join(components)
|
||||||
|
return hashlib.md5(key.encode()).hexdigest()
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_related_chat_ids(self, chat_id: str) -> List[str]:
|
||||||
|
"""根据expression_groups配置,获取与当前chat_id相关的所有chat_id(包括自身)"""
|
||||||
|
groups = global_config.expression.expression_groups
|
||||||
|
for group in groups:
|
||||||
|
group_chat_ids = []
|
||||||
|
for stream_config_str in group:
|
||||||
|
chat_id_candidate = self._parse_stream_config_to_chat_id(stream_config_str)
|
||||||
|
if chat_id_candidate:
|
||||||
|
group_chat_ids.append(chat_id_candidate)
|
||||||
|
if chat_id in group_chat_ids:
|
||||||
|
return group_chat_ids
|
||||||
|
return [chat_id]
|
||||||
|
|
||||||
def get_random_expressions(
|
def get_random_expressions(
|
||||||
self, chat_id: str, total_num: int, style_percentage: float, grammar_percentage: float
|
self, chat_id: str, total_num: int, style_percentage: float, grammar_percentage: float
|
||||||
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
|
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
|
||||||
# 直接数据库查询
|
# 支持多chat_id合并抽选
|
||||||
style_query = Expression.select().where((Expression.chat_id == chat_id) & (Expression.type == "style"))
|
related_chat_ids = self.get_related_chat_ids(chat_id)
|
||||||
grammar_query = Expression.select().where((Expression.chat_id == chat_id) & (Expression.type == "grammar"))
|
style_exprs = []
|
||||||
style_exprs = [
|
grammar_exprs = []
|
||||||
{
|
for cid in related_chat_ids:
|
||||||
"situation": expr.situation,
|
style_query = Expression.select().where((Expression.chat_id == cid) & (Expression.type == "style"))
|
||||||
"style": expr.style,
|
grammar_query = Expression.select().where((Expression.chat_id == cid) & (Expression.type == "grammar"))
|
||||||
"count": expr.count,
|
style_exprs.extend([
|
||||||
"last_active_time": expr.last_active_time,
|
{
|
||||||
"source_id": chat_id,
|
"situation": expr.situation,
|
||||||
"type": "style"
|
"style": expr.style,
|
||||||
} for expr in style_query
|
"count": expr.count,
|
||||||
]
|
"last_active_time": expr.last_active_time,
|
||||||
grammar_exprs = [
|
"source_id": cid,
|
||||||
{
|
"type": "style"
|
||||||
"situation": expr.situation,
|
} for expr in style_query
|
||||||
"style": expr.style,
|
])
|
||||||
"count": expr.count,
|
grammar_exprs.extend([
|
||||||
"last_active_time": expr.last_active_time,
|
{
|
||||||
"source_id": chat_id,
|
"situation": expr.situation,
|
||||||
"type": "grammar"
|
"style": expr.style,
|
||||||
} for expr in grammar_query
|
"count": expr.count,
|
||||||
]
|
"last_active_time": expr.last_active_time,
|
||||||
|
"source_id": cid,
|
||||||
|
"type": "grammar"
|
||||||
|
} for expr in grammar_query
|
||||||
|
])
|
||||||
style_num = int(total_num * style_percentage)
|
style_num = int(total_num * style_percentage)
|
||||||
grammar_num = int(total_num * grammar_percentage)
|
grammar_num = int(total_num * grammar_percentage)
|
||||||
# 按权重抽样(使用count作为权重)
|
# 按权重抽样(使用count作为权重)
|
||||||
|
|||||||
@@ -186,6 +186,7 @@ class MessageRecvS4U(MessageRecv):
|
|||||||
def __init__(self, message_dict: dict[str, Any]):
|
def __init__(self, message_dict: dict[str, Any]):
|
||||||
super().__init__(message_dict)
|
super().__init__(message_dict)
|
||||||
self.is_gift = False
|
self.is_gift = False
|
||||||
|
self.is_fake_gift = False
|
||||||
self.is_superchat = False
|
self.is_superchat = False
|
||||||
self.gift_info = None
|
self.gift_info = None
|
||||||
self.gift_name = None
|
self.gift_name = None
|
||||||
@@ -194,6 +195,7 @@ class MessageRecvS4U(MessageRecv):
|
|||||||
self.superchat_price = None
|
self.superchat_price = None
|
||||||
self.superchat_message_text = None
|
self.superchat_message_text = None
|
||||||
self.is_screen = False
|
self.is_screen = False
|
||||||
|
self.voice_done = None
|
||||||
|
|
||||||
async def process(self) -> None:
|
async def process(self) -> None:
|
||||||
self.processed_plain_text = await self._process_message_segments(self.message_segment)
|
self.processed_plain_text = await self._process_message_segments(self.message_segment)
|
||||||
@@ -257,6 +259,11 @@ class MessageRecvS4U(MessageRecv):
|
|||||||
self.gift_name = name.strip()
|
self.gift_name = name.strip()
|
||||||
self.gift_count = int(count.strip())
|
self.gift_count = int(count.strip())
|
||||||
return ""
|
return ""
|
||||||
|
elif segment.type == "voice_done":
|
||||||
|
msg_id = segment.data
|
||||||
|
logger.info(f"voice_done: {msg_id}")
|
||||||
|
self.voice_done = msg_id
|
||||||
|
return ""
|
||||||
elif segment.type == "superchat":
|
elif segment.type == "superchat":
|
||||||
self.is_superchat = True
|
self.is_superchat = True
|
||||||
self.superchat_info = segment.data
|
self.superchat_info = segment.data
|
||||||
|
|||||||
@@ -34,5 +34,7 @@ max_typing_delay = 2.0 # 最大打字延迟(秒)
|
|||||||
enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
|
enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
|
||||||
enable_loading_indicator = true # 是否显示加载提示
|
enable_loading_indicator = true # 是否显示加载提示
|
||||||
|
|
||||||
|
enable_streaming_output = true # 是否启用流式输出,false时全部生成后一次性发送
|
||||||
|
|
||||||
max_context_message_length = 20
|
max_context_message_length = 20
|
||||||
max_core_message_length = 30
|
max_core_message_length = 30
|
||||||
@@ -34,5 +34,7 @@ max_typing_delay = 2.0 # 最大打字延迟(秒)
|
|||||||
enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
|
enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
|
||||||
enable_loading_indicator = true # 是否显示加载提示
|
enable_loading_indicator = true # 是否显示加载提示
|
||||||
|
|
||||||
|
enable_streaming_output = true # 是否启用流式输出,false时全部生成后一次性发送
|
||||||
|
|
||||||
max_context_message_length = 20
|
max_context_message_length = 20
|
||||||
max_core_message_length = 30
|
max_core_message_length = 30
|
||||||
@@ -23,7 +23,7 @@ class GiftManager:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
"""初始化礼物管理器"""
|
"""初始化礼物管理器"""
|
||||||
self.pending_gifts: Dict[Tuple[str, str], PendingGift] = {}
|
self.pending_gifts: Dict[Tuple[str, str], PendingGift] = {}
|
||||||
self.debounce_timeout = 3.0 # 3秒防抖时间
|
self.debounce_timeout = 5.0 # 3秒防抖时间
|
||||||
|
|
||||||
async def handle_gift(self, message: MessageRecvS4U, callback: Optional[Callable[[MessageRecvS4U], None]] = None) -> bool:
|
async def handle_gift(self, message: MessageRecvS4U, callback: Optional[Callable[[MessageRecvS4U], None]] = None) -> bool:
|
||||||
"""处理礼物消息,返回是否应该立即处理
|
"""处理礼物消息,返回是否应该立即处理
|
||||||
|
|||||||
@@ -34,6 +34,10 @@ class MessageSenderContainer:
|
|||||||
self._paused_event = asyncio.Event()
|
self._paused_event = asyncio.Event()
|
||||||
self._paused_event.set() # 默认设置为非暂停状态
|
self._paused_event.set() # 默认设置为非暂停状态
|
||||||
|
|
||||||
|
self.msg_id = ""
|
||||||
|
|
||||||
|
self.voice_done = ""
|
||||||
|
|
||||||
|
|
||||||
async def add_message(self, chunk: str):
|
async def add_message(self, chunk: str):
|
||||||
"""向队列中添加一个消息块。"""
|
"""向队列中添加一个消息块。"""
|
||||||
@@ -84,14 +88,9 @@ class MessageSenderContainer:
|
|||||||
delay = s4u_config.typing_delay
|
delay = s4u_config.typing_delay
|
||||||
await asyncio.sleep(delay)
|
await asyncio.sleep(delay)
|
||||||
|
|
||||||
current_time = time.time()
|
message_segment = Seg(type="tts_text", data=f"{self.msg_id}:{chunk}")
|
||||||
msg_id = f"{current_time}_{random.randint(1000, 9999)}"
|
|
||||||
|
|
||||||
text_to_send = chunk
|
|
||||||
|
|
||||||
message_segment = Seg(type="text", data=text_to_send)
|
|
||||||
bot_message = MessageSending(
|
bot_message = MessageSending(
|
||||||
message_id=msg_id,
|
message_id=self.msg_id,
|
||||||
chat_stream=self.chat_stream,
|
chat_stream=self.chat_stream,
|
||||||
bot_user_info=UserInfo(
|
bot_user_info=UserInfo(
|
||||||
user_id=global_config.bot.qq_account,
|
user_id=global_config.bot.qq_account,
|
||||||
@@ -109,8 +108,26 @@ class MessageSenderContainer:
|
|||||||
await bot_message.process()
|
await bot_message.process()
|
||||||
|
|
||||||
await get_global_api().send_message(bot_message)
|
await get_global_api().send_message(bot_message)
|
||||||
logger.info(f"已将消息 '{text_to_send}' 发往平台 '{bot_message.message_info.platform}'")
|
logger.info(f"已将消息 '{self.msg_id}:{chunk}' 发往平台 '{bot_message.message_info.platform}'")
|
||||||
|
|
||||||
|
message_segment = Seg(type="text", data=chunk)
|
||||||
|
bot_message = MessageSending(
|
||||||
|
message_id=self.msg_id,
|
||||||
|
chat_stream=self.chat_stream,
|
||||||
|
bot_user_info=UserInfo(
|
||||||
|
user_id=global_config.bot.qq_account,
|
||||||
|
user_nickname=global_config.bot.nickname,
|
||||||
|
platform=self.original_message.message_info.platform,
|
||||||
|
),
|
||||||
|
sender_info=self.original_message.message_info.user_info,
|
||||||
|
message_segment=message_segment,
|
||||||
|
reply=self.original_message,
|
||||||
|
is_emoji=False,
|
||||||
|
apply_set_reply_logic=True,
|
||||||
|
reply_to=f"{self.original_message.message_info.user_info.platform}:{self.original_message.message_info.user_info.user_id}",
|
||||||
|
)
|
||||||
|
await bot_message.process()
|
||||||
|
|
||||||
await self.storage.store_message(bot_message, self.chat_stream)
|
await self.storage.store_message(bot_message, self.chat_stream)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -175,6 +192,10 @@ class S4UChat:
|
|||||||
self.gpt = S4UStreamGenerator()
|
self.gpt = S4UStreamGenerator()
|
||||||
self.interest_dict: Dict[str, float] = {} # 用户兴趣分
|
self.interest_dict: Dict[str, float] = {} # 用户兴趣分
|
||||||
|
|
||||||
|
|
||||||
|
self.msg_id = ""
|
||||||
|
self.voice_done = ""
|
||||||
|
|
||||||
logger.info(f"[{self.stream_name}] S4UChat with two-queue system initialized.")
|
logger.info(f"[{self.stream_name}] S4UChat with two-queue system initialized.")
|
||||||
|
|
||||||
def _get_priority_info(self, message: MessageRecv) -> dict:
|
def _get_priority_info(self, message: MessageRecv) -> dict:
|
||||||
@@ -197,8 +218,11 @@ class S4UChat:
|
|||||||
def _get_interest_score(self, user_id: str) -> float:
|
def _get_interest_score(self, user_id: str) -> float:
|
||||||
"""获取用户的兴趣分,默认为1.0"""
|
"""获取用户的兴趣分,默认为1.0"""
|
||||||
return self.interest_dict.get(user_id, 1.0)
|
return self.interest_dict.get(user_id, 1.0)
|
||||||
|
|
||||||
|
def go_processing(self):
|
||||||
|
if self.voice_done == self.msg_id:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
def _calculate_base_priority_score(self, message: MessageRecv, priority_info: dict) -> float:
|
def _calculate_base_priority_score(self, message: MessageRecv, priority_info: dict) -> float:
|
||||||
"""
|
"""
|
||||||
@@ -413,45 +437,59 @@ class S4UChat:
|
|||||||
await asyncio.sleep(random_delay)
|
await asyncio.sleep(random_delay)
|
||||||
chat_watching = watching_manager.get_watching_by_chat_id(self.stream_id)
|
chat_watching = watching_manager.get_watching_by_chat_id(self.stream_id)
|
||||||
await chat_watching.on_message_received()
|
await chat_watching.on_message_received()
|
||||||
|
|
||||||
|
def get_processing_message_id(self):
|
||||||
|
self.msg_id = f"{time.time()}_{random.randint(1000, 9999)}"
|
||||||
|
|
||||||
async def _generate_and_send(self, message: MessageRecv):
|
async def _generate_and_send(self, message: MessageRecv):
|
||||||
"""为单个消息生成文本回复。整个过程可以被中断。"""
|
"""为单个消息生成文本回复。整个过程可以被中断。"""
|
||||||
self._is_replying = True
|
self._is_replying = True
|
||||||
total_chars_sent = 0 # 跟踪发送的总字符数
|
total_chars_sent = 0 # 跟踪发送的总字符数
|
||||||
|
|
||||||
|
self.get_processing_message_id()
|
||||||
|
|
||||||
if s4u_config.enable_loading_indicator:
|
if s4u_config.enable_loading_indicator:
|
||||||
await send_loading(self.stream_id, "......")
|
await send_loading(self.stream_id, ".........")
|
||||||
|
|
||||||
# 视线管理:开始生成回复时切换视线状态
|
# 视线管理:开始生成回复时切换视线状态
|
||||||
chat_watching = watching_manager.get_watching_by_chat_id(self.stream_id)
|
chat_watching = watching_manager.get_watching_by_chat_id(self.stream_id)
|
||||||
asyncio.create_task(self.delay_change_watching_state())
|
asyncio.create_task(self.delay_change_watching_state())
|
||||||
|
|
||||||
|
|
||||||
sender_container = MessageSenderContainer(self.chat_stream, message)
|
sender_container = MessageSenderContainer(self.chat_stream, message)
|
||||||
sender_container.start()
|
sender_container.start()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logger.info(f"[S4U] 开始为消息生成文本和音频流: '{message.processed_plain_text[:30]}...'")
|
logger.info(f"[S4U] 开始为消息生成文本和音频流: '{message.processed_plain_text[:30]}...'")
|
||||||
|
|
||||||
# 1. 逐句生成文本、发送
|
if s4u_config.enable_streaming_output:
|
||||||
gen = self.gpt.generate_response(message, "")
|
# 流式输出,边生成边发送
|
||||||
async for chunk in gen:
|
gen = self.gpt.generate_response(message, "")
|
||||||
# 如果任务被取消,await 会在此处引发 CancelledError
|
async for chunk in gen:
|
||||||
|
sender_container.msg_id = self.msg_id
|
||||||
# a. 发送文本块
|
await sender_container.add_message(chunk)
|
||||||
await sender_container.add_message(chunk)
|
total_chars_sent += len(chunk)
|
||||||
total_chars_sent += len(chunk) # 累计字符数
|
else:
|
||||||
|
# 一次性输出,先收集所有chunk
|
||||||
|
all_chunks = []
|
||||||
|
gen = self.gpt.generate_response(message, "")
|
||||||
|
async for chunk in gen:
|
||||||
|
all_chunks.append(chunk)
|
||||||
|
total_chars_sent += len(chunk)
|
||||||
|
# 一次性发送
|
||||||
|
sender_container.msg_id = self.msg_id
|
||||||
|
await sender_container.add_message("".join(all_chunks))
|
||||||
|
|
||||||
# 等待所有文本消息发送完成
|
# 等待所有文本消息发送完成
|
||||||
await sender_container.close()
|
await sender_container.close()
|
||||||
await sender_container.join()
|
await sender_container.join()
|
||||||
|
|
||||||
# 回复完成后延迟,每个字延迟0.4秒
|
start_time = time.time()
|
||||||
if total_chars_sent > 0:
|
while not self.go_processing():
|
||||||
delay_time = total_chars_sent * 0.4
|
if time.time() - start_time > 60:
|
||||||
logger.info(f"[{self.stream_name}] 回复完成,共发送 {total_chars_sent} 个字符,等待 {delay_time:.1f} 秒后继续处理下一个消息。")
|
logger.warning(f"[{self.stream_name}] 等待消息发送超时(60秒),强制跳出循环。")
|
||||||
await asyncio.sleep(delay_time)
|
break
|
||||||
|
logger.info(f"[{self.stream_name}] 等待消息发送完成...")
|
||||||
|
await asyncio.sleep(0.3)
|
||||||
|
|
||||||
logger.info(f"[{self.stream_name}] 所有文本块处理完毕。")
|
logger.info(f"[{self.stream_name}] 所有文本块处理完毕。")
|
||||||
|
|
||||||
|
|||||||
@@ -93,6 +93,9 @@ class S4UMessageProcessor:
|
|||||||
group_info=groupinfo,
|
group_info=groupinfo,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if await self.hadle_if_voice_done(message):
|
||||||
|
return
|
||||||
|
|
||||||
# 处理礼物消息,如果消息被暂存则停止当前处理流程
|
# 处理礼物消息,如果消息被暂存则停止当前处理流程
|
||||||
if not skip_gift_debounce and not await self.handle_if_gift(message):
|
if not skip_gift_debounce and not await self.handle_if_gift(message):
|
||||||
return
|
return
|
||||||
@@ -107,6 +110,7 @@ class S4UMessageProcessor:
|
|||||||
|
|
||||||
s4u_chat = get_s4u_chat_manager().get_or_create_chat(chat)
|
s4u_chat = get_s4u_chat_manager().get_or_create_chat(chat)
|
||||||
|
|
||||||
|
|
||||||
await s4u_chat.add_message(message)
|
await s4u_chat.add_message(message)
|
||||||
|
|
||||||
_interested_rate, _ = await _calculate_interest(message)
|
_interested_rate, _ = await _calculate_interest(message)
|
||||||
@@ -139,14 +143,21 @@ class S4UMessageProcessor:
|
|||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
async def hadle_if_voice_done(self, message: MessageRecvS4U):
|
||||||
|
if message.voice_done:
|
||||||
|
s4u_chat = get_s4u_chat_manager().get_or_create_chat(message.chat_stream)
|
||||||
|
s4u_chat.voice_done = message.voice_done
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
async def check_if_fake_gift(self, message: MessageRecvS4U) -> bool:
|
async def check_if_fake_gift(self, message: MessageRecvS4U) -> bool:
|
||||||
"""检查消息是否为假礼物"""
|
"""检查消息是否为假礼物"""
|
||||||
if message.is_gift:
|
if message.is_gift:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
gift_keywords = ["送出了礼物", "礼物", "送出了"]
|
gift_keywords = ["送出了礼物", "礼物", "送出了","投喂"]
|
||||||
if any(keyword in message.processed_plain_text for keyword in gift_keywords):
|
if any(keyword in message.processed_plain_text for keyword in gift_keywords):
|
||||||
message.processed_plain_text += "(注意:这是一条普通弹幕信息,对方没有真的发送礼物,不是礼物信息,注意区分)"
|
message.is_fake_gift = True
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from src.person_info.relationship_manager import get_relationship_manager
|
|||||||
from src.chat.message_receive.chat_stream import ChatStream
|
from src.chat.message_receive.chat_stream import ChatStream
|
||||||
from src.mais4u.mais4u_chat.super_chat_manager import get_super_chat_manager
|
from src.mais4u.mais4u_chat.super_chat_manager import get_super_chat_manager
|
||||||
from src.mais4u.mais4u_chat.screen_manager import screen_manager
|
from src.mais4u.mais4u_chat.screen_manager import screen_manager
|
||||||
|
from src.chat.express.expression_selector import expression_selector
|
||||||
|
|
||||||
logger = get_logger("prompt")
|
logger = get_logger("prompt")
|
||||||
|
|
||||||
@@ -36,6 +37,7 @@ def init_prompt():
|
|||||||
|
|
||||||
{relation_info_block}
|
{relation_info_block}
|
||||||
{memory_block}
|
{memory_block}
|
||||||
|
{expression_habits_block}
|
||||||
|
|
||||||
你现在的主要任务是和 {sender_name} 发送的弹幕聊天。同时,也有其他用户会参与你们的聊天,你可以参考他们的回复内容,但是你主要还是关注你和{sender_name}的聊天内容。
|
你现在的主要任务是和 {sender_name} 发送的弹幕聊天。同时,也有其他用户会参与你们的聊天,你可以参考他们的回复内容,但是你主要还是关注你和{sender_name}的聊天内容。
|
||||||
|
|
||||||
@@ -50,6 +52,7 @@ def init_prompt():
|
|||||||
对方最新发送的内容:{message_txt}
|
对方最新发送的内容:{message_txt}
|
||||||
{gift_info}
|
{gift_info}
|
||||||
回复可以简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。
|
回复可以简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。
|
||||||
|
表现的有个性,不要随意服从他人要求,积极互动。
|
||||||
不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容,现在{sender_name}正在等待你的回复。
|
不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出回复内容,现在{sender_name}正在等待你的回复。
|
||||||
你的回复风格不要浮夸,有逻辑和条理,请你继续回复{sender_name}。
|
你的回复风格不要浮夸,有逻辑和条理,请你继续回复{sender_name}。
|
||||||
你的发言:
|
你的发言:
|
||||||
@@ -63,32 +66,42 @@ class PromptBuilder:
|
|||||||
self.prompt_built = ""
|
self.prompt_built = ""
|
||||||
self.activate_messages = ""
|
self.activate_messages = ""
|
||||||
|
|
||||||
async def build_identity_block(self) -> str:
|
|
||||||
person_info_manager = get_person_info_manager()
|
async def build_expression_habits(self, chat_stream: ChatStream, chat_history, target):
|
||||||
bot_person_id = person_info_manager.get_person_id("system", "bot_id")
|
|
||||||
bot_name = global_config.bot.nickname
|
|
||||||
if global_config.bot.alias_names:
|
|
||||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
|
||||||
else:
|
|
||||||
bot_nickname = ""
|
|
||||||
short_impression = await person_info_manager.get_value(bot_person_id, "short_impression")
|
|
||||||
try:
|
|
||||||
if isinstance(short_impression, str) and short_impression.strip():
|
|
||||||
short_impression = ast.literal_eval(short_impression)
|
|
||||||
elif not short_impression:
|
|
||||||
logger.warning("short_impression为空,使用默认值")
|
|
||||||
short_impression = ["友好活泼", "人类"]
|
|
||||||
except (ValueError, SyntaxError) as e:
|
|
||||||
logger.error(f"解析short_impression失败: {e}, 原始值: {short_impression}")
|
|
||||||
short_impression = ["友好活泼", "人类"]
|
|
||||||
|
|
||||||
if not isinstance(short_impression, list) or len(short_impression) < 2:
|
style_habits = []
|
||||||
logger.warning(f"short_impression格式不正确: {short_impression}, 使用默认值")
|
grammar_habits = []
|
||||||
short_impression = ["友好活泼", "人类"]
|
|
||||||
personality = short_impression[0]
|
# 使用从处理器传来的选中表达方式
|
||||||
identity = short_impression[1]
|
# LLM模式:调用LLM选择5-10个,然后随机选5个
|
||||||
prompt_personality = personality + "," + identity
|
selected_expressions = await expression_selector.select_suitable_expressions_llm(
|
||||||
return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
chat_stream.stream_id, chat_history, max_num=12, min_num=5, target_message=target
|
||||||
|
)
|
||||||
|
|
||||||
|
if selected_expressions:
|
||||||
|
logger.debug(f" 使用处理器选中的{len(selected_expressions)}个表达方式")
|
||||||
|
for expr in selected_expressions:
|
||||||
|
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||||
|
expr_type = expr.get("type", "style")
|
||||||
|
if expr_type == "grammar":
|
||||||
|
grammar_habits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||||
|
else:
|
||||||
|
style_habits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||||
|
else:
|
||||||
|
logger.debug("没有从处理器获得表达方式,将使用空的表达方式")
|
||||||
|
# 不再在replyer中进行随机选择,全部交给处理器处理
|
||||||
|
|
||||||
|
style_habits_str = "\n".join(style_habits)
|
||||||
|
grammar_habits_str = "\n".join(grammar_habits)
|
||||||
|
|
||||||
|
# 动态构建expression habits块
|
||||||
|
expression_habits_block = ""
|
||||||
|
if style_habits_str.strip():
|
||||||
|
expression_habits_block += f"你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:\n{style_habits_str}\n\n"
|
||||||
|
if grammar_habits_str.strip():
|
||||||
|
expression_habits_block += f"请你根据情景使用以下句法:\n{grammar_habits_str}\n"
|
||||||
|
|
||||||
|
return expression_habits_block
|
||||||
|
|
||||||
async def build_relation_info(self, chat_stream) -> str:
|
async def build_relation_info(self, chat_stream) -> str:
|
||||||
is_group_chat = bool(chat_stream.group_info)
|
is_group_chat = bool(chat_stream.group_info)
|
||||||
@@ -149,8 +162,10 @@ class PromptBuilder:
|
|||||||
if msg_user_id == bot_id:
|
if msg_user_id == bot_id:
|
||||||
if msg_dict.get("reply_to") and talk_type == msg_dict.get("reply_to"):
|
if msg_dict.get("reply_to") and talk_type == msg_dict.get("reply_to"):
|
||||||
core_dialogue_list.append(msg_dict)
|
core_dialogue_list.append(msg_dict)
|
||||||
else:
|
elif msg_dict.get("reply_to") and talk_type != msg_dict.get("reply_to"):
|
||||||
background_dialogue_list.append(msg_dict)
|
background_dialogue_list.append(msg_dict)
|
||||||
|
# else:
|
||||||
|
# background_dialogue_list.append(msg_dict)
|
||||||
elif msg_user_id == target_user_id:
|
elif msg_user_id == target_user_id:
|
||||||
core_dialogue_list.append(msg_dict)
|
core_dialogue_list.append(msg_dict)
|
||||||
else:
|
else:
|
||||||
@@ -210,6 +225,10 @@ class PromptBuilder:
|
|||||||
def build_gift_info(self, message: MessageRecvS4U):
|
def build_gift_info(self, message: MessageRecvS4U):
|
||||||
if message.is_gift:
|
if message.is_gift:
|
||||||
return f"这是一条礼物信息,{message.gift_name} x{message.gift_count},请注意这位用户"
|
return f"这是一条礼物信息,{message.gift_name} x{message.gift_count},请注意这位用户"
|
||||||
|
else:
|
||||||
|
if message.is_fake_gift:
|
||||||
|
return f"{message.processed_plain_text}(注意:这是一条普通弹幕信息,对方没有真的发送礼物,不是礼物信息,注意区分,如果对方在发假的礼物骗你,请反击)"
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def build_sc_info(self, message: MessageRecvS4U):
|
def build_sc_info(self, message: MessageRecvS4U):
|
||||||
@@ -223,8 +242,8 @@ class PromptBuilder:
|
|||||||
message_txt: str,
|
message_txt: str,
|
||||||
sender_name: str = "某人",
|
sender_name: str = "某人",
|
||||||
) -> str:
|
) -> str:
|
||||||
identity_block, relation_info_block, memory_block = await asyncio.gather(
|
relation_info_block, memory_block, expression_habits_block = await asyncio.gather(
|
||||||
self.build_identity_block(), self.build_relation_info(chat_stream), self.build_memory_block(message_txt)
|
self.build_relation_info(chat_stream), self.build_memory_block(message_txt), self.build_expression_habits(chat_stream, message_txt, sender_name)
|
||||||
)
|
)
|
||||||
|
|
||||||
core_dialogue_prompt, background_dialogue_prompt = self.build_chat_history_prompts(chat_stream, message)
|
core_dialogue_prompt, background_dialogue_prompt = self.build_chat_history_prompts(chat_stream, message)
|
||||||
@@ -241,8 +260,8 @@ class PromptBuilder:
|
|||||||
|
|
||||||
prompt = await global_prompt_manager.format_prompt(
|
prompt = await global_prompt_manager.format_prompt(
|
||||||
template_name,
|
template_name,
|
||||||
identity_block=identity_block,
|
|
||||||
time_block=time_block,
|
time_block=time_block,
|
||||||
|
expression_habits_block=expression_habits_block,
|
||||||
relation_info_block=relation_info_block,
|
relation_info_block=relation_info_block,
|
||||||
memory_block=memory_block,
|
memory_block=memory_block,
|
||||||
screen_info=screen_info,
|
screen_info=screen_info,
|
||||||
|
|||||||
@@ -62,7 +62,10 @@ class S4UStreamGenerator:
|
|||||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||||
|
|
||||||
if message.chat_stream.user_info.user_nickname:
|
if message.chat_stream.user_info.user_nickname:
|
||||||
sender_name = f"[{message.chat_stream.user_info.user_nickname}](你叫ta{person_name})"
|
if person_name:
|
||||||
|
sender_name = f"[{message.chat_stream.user_info.user_nickname}](你叫ta{person_name})"
|
||||||
|
else:
|
||||||
|
sender_name = f"[{message.chat_stream.user_info.user_nickname}]"
|
||||||
else:
|
else:
|
||||||
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||||
|
|
||||||
|
|||||||
@@ -167,6 +167,9 @@ class S4UConfig(S4UConfigBase):
|
|||||||
enable_loading_indicator: bool = True
|
enable_loading_indicator: bool = True
|
||||||
"""是否显示加载提示"""
|
"""是否显示加载提示"""
|
||||||
|
|
||||||
|
enable_streaming_output: bool = True
|
||||||
|
"""是否启用流式输出,false时全部生成后一次性发送"""
|
||||||
|
|
||||||
max_context_message_length: int = 20
|
max_context_message_length: int = 20
|
||||||
"""上下文消息最大长度"""
|
"""上下文消息最大长度"""
|
||||||
|
|
||||||
|
|||||||
@@ -370,7 +370,14 @@ async def custom_to_stream(
|
|||||||
bool: 是否发送成功
|
bool: 是否发送成功
|
||||||
"""
|
"""
|
||||||
return await _send_to_target(
|
return await _send_to_target(
|
||||||
message_type, content, stream_id, display_message, typing, reply_to, storage_message, show_log
|
message_type=message_type,
|
||||||
|
content=content,
|
||||||
|
stream_id=stream_id,
|
||||||
|
display_message=display_message,
|
||||||
|
typing=typing,
|
||||||
|
reply_to=reply_to,
|
||||||
|
storage_message=storage_message,
|
||||||
|
show_log=show_log,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user