fix:调整目录结构,优化hfc prompt,移除日程,移除动态和llm判断willing模式,
This commit is contained in:
333
src/chat/focus_chat/expressors/default_expressor.py
Normal file
333
src/chat/focus_chat/expressors/default_expressor.py
Normal file
@@ -0,0 +1,333 @@
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Optional, Dict, Any
|
||||
from src.chat.message_receive.message import MessageRecv, MessageThinking, MessageSending
|
||||
from src.chat.message_receive.message import Seg # Local import needed after move
|
||||
from src.chat.message_receive.message import UserInfo
|
||||
from src.chat.message_receive.chat_stream import chat_manager
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.chat.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
|
||||
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
|
||||
from src.chat.emoji_system.emoji_manager import emoji_manager
|
||||
from src.chat.focus_chat.heartflow_prompt_builder import prompt_builder
|
||||
from src.chat.focus_chat.heartFC_sender import HeartFCSender
|
||||
from src.chat.utils.utils import process_llm_response
|
||||
from src.chat.utils.info_catcher import info_catcher_manager
|
||||
from src.manager.mood_manager import mood_manager
|
||||
from src.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
|
||||
logger = get_logger("expressor")
|
||||
|
||||
|
||||
class DefaultExpressor:
|
||||
def __init__(self, chat_id: str):
|
||||
self.log_prefix = "expressor"
|
||||
self.express_model = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_heartflow",
|
||||
)
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
|
||||
self.chat_id = chat_id
|
||||
self.chat_stream: Optional[ChatStream] = None
|
||||
self.is_group_chat = True
|
||||
self.chat_target_info = None
|
||||
|
||||
async def initialize(self):
|
||||
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
|
||||
|
||||
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv]) -> Optional[str]:
|
||||
"""创建思考消息 (尝试锚定到 anchor_message)"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。")
|
||||
return None
|
||||
|
||||
chat = anchor_message.chat_stream
|
||||
messageinfo = anchor_message.message_info
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
platform=messageinfo.platform,
|
||||
)
|
||||
# logger.debug(f"创建思考消息:{anchor_message}")
|
||||
# logger.debug(f"创建思考消息chat:{chat}")
|
||||
# logger.debug(f"创建思考消息bot_user_info:{bot_user_info}")
|
||||
# logger.debug(f"创建思考消息messageinfo:{messageinfo}")
|
||||
|
||||
thinking_time_point = round(time.time(), 2)
|
||||
thinking_id = "mt" + str(thinking_time_point)
|
||||
thinking_message = MessageThinking(
|
||||
message_id=thinking_id,
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
reply=anchor_message, # 回复的是锚点消息
|
||||
thinking_start_time=thinking_time_point,
|
||||
)
|
||||
logger.debug(f"创建思考消息thinking_message:{thinking_message}")
|
||||
# Access MessageManager directly (using heart_fc_sender)
|
||||
await self.heart_fc_sender.register_thinking(thinking_message)
|
||||
return thinking_id
|
||||
|
||||
async def deal_reply(
|
||||
self,
|
||||
cycle_timers: dict,
|
||||
action_data: Dict[str, Any],
|
||||
reasoning: str,
|
||||
anchor_message: MessageRecv,
|
||||
) -> tuple[bool, Optional[List[str]]]:
|
||||
# 创建思考消息
|
||||
thinking_id = await self._create_thinking_message(anchor_message)
|
||||
if not thinking_id:
|
||||
raise Exception("无法创建思考消息")
|
||||
|
||||
reply = None # 初始化 reply,防止未定义
|
||||
try:
|
||||
has_sent_something = False
|
||||
|
||||
# 处理文本部分
|
||||
text_part = action_data.get("text", [])
|
||||
if text_part:
|
||||
with Timer("生成回复", cycle_timers):
|
||||
# 可以保留原有的文本处理逻辑或进行适当调整
|
||||
reply = await self.express(
|
||||
in_mind_reply=text_part,
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
reason=reasoning,
|
||||
action_data=action_data,
|
||||
)
|
||||
|
||||
if reply:
|
||||
with Timer("发送文本消息", cycle_timers):
|
||||
await self._send_response_messages(
|
||||
anchor_message=anchor_message,
|
||||
thinking_id=thinking_id,
|
||||
response_set=reply,
|
||||
)
|
||||
has_sent_something = True
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 文本回复生成失败")
|
||||
|
||||
# 处理表情部分
|
||||
emoji_keyword = action_data.get("emojis", [])
|
||||
if emoji_keyword:
|
||||
await self._handle_emoji(anchor_message, [], emoji_keyword)
|
||||
has_sent_something = True
|
||||
|
||||
if not has_sent_something:
|
||||
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
|
||||
|
||||
return has_sent_something, reply
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"回复失败: {e}")
|
||||
return False, None
|
||||
|
||||
# --- 回复器 (Replier) 的定义 --- #
|
||||
|
||||
async def express(
|
||||
self,
|
||||
in_mind_reply: str,
|
||||
reason: str,
|
||||
anchor_message: MessageRecv,
|
||||
thinking_id: str,
|
||||
action_data: Dict[str, Any],
|
||||
) -> Optional[List[str]]:
|
||||
"""
|
||||
回复器 (Replier): 核心逻辑,负责生成回复文本。
|
||||
(已整合原 HeartFCGenerator 的功能)
|
||||
"""
|
||||
try:
|
||||
# 1. 获取情绪影响因子并调整模型温度
|
||||
arousal_multiplier = mood_manager.get_arousal_multiplier()
|
||||
current_temp = float(global_config.llm_normal["temp"]) * arousal_multiplier
|
||||
self.express_model.params["temperature"] = current_temp # 动态调整温度
|
||||
|
||||
# 2. 获取信息捕捉器
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
# --- Determine sender_name for private chat ---
|
||||
sender_name_for_prompt = "某人" # Default for group or if info unavailable
|
||||
if not self.is_group_chat and self.chat_target_info:
|
||||
# Prioritize person_name, then nickname
|
||||
sender_name_for_prompt = (
|
||||
self.chat_target_info.get("person_name")
|
||||
or self.chat_target_info.get("user_nickname")
|
||||
or sender_name_for_prompt
|
||||
)
|
||||
# --- End determining sender_name ---
|
||||
|
||||
target_message = action_data.get("target", "")
|
||||
|
||||
# 3. 构建 Prompt
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await prompt_builder.build_prompt(
|
||||
build_mode="focus",
|
||||
chat_stream=self.chat_stream, # Pass the stream object
|
||||
in_mind_reply=in_mind_reply,
|
||||
reason=reason,
|
||||
current_mind_info="",
|
||||
structured_info="",
|
||||
sender_name=sender_name_for_prompt, # Pass determined name
|
||||
target_message=target_message,
|
||||
)
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
content = None
|
||||
reasoning_content = None
|
||||
model_name = "unknown_model"
|
||||
if not prompt:
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。")
|
||||
return None
|
||||
|
||||
try:
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
|
||||
content, reasoning_content, model_name = await self.express_model.generate_response(prompt)
|
||||
|
||||
logger.info(f"{self.log_prefix}\nPrompt:\n{prompt}\n---------------------------\n")
|
||||
|
||||
logger.info(f"想要表达:{in_mind_reply}")
|
||||
logger.info(f"理由:{reason}")
|
||||
logger.info(f"生成回复: {content}\n")
|
||||
info_catcher.catch_after_llm_generated(
|
||||
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
|
||||
)
|
||||
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成失败: {llm_e}")
|
||||
return None # LLM 调用失败则无法生成回复
|
||||
|
||||
# 5. 处理 LLM 响应
|
||||
if not content:
|
||||
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] LLM 生成了空内容。")
|
||||
return None
|
||||
|
||||
processed_response = process_llm_response(content)
|
||||
|
||||
if not processed_response:
|
||||
logger.warning(f"{self.log_prefix}[Replier-{thinking_id}] 处理后的回复为空。")
|
||||
return None
|
||||
|
||||
return processed_response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] 回复生成意外失败: {e}")
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
# --- 发送器 (Sender) --- #
|
||||
|
||||
async def _send_response_messages(
|
||||
self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str
|
||||
) -> Optional[MessageSending]:
|
||||
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
|
||||
chat = self.chat_stream
|
||||
if chat is None:
|
||||
logger.error(f"{self.log_prefix} 无法发送回复,chat_stream 为空。")
|
||||
return None
|
||||
if not anchor_message:
|
||||
logger.error(f"{self.log_prefix} 无法发送回复,anchor_message 为空。")
|
||||
return None
|
||||
|
||||
chat_id = self.chat_id
|
||||
stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志
|
||||
|
||||
# 检查思考过程是否仍在进行,并获取开始时间
|
||||
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id)
|
||||
|
||||
if thinking_start_time is None:
|
||||
logger.warning(f"[{stream_name}] {thinking_id} 思考过程未找到或已结束,无法发送回复。")
|
||||
return None
|
||||
|
||||
mark_head = False
|
||||
first_bot_msg: Optional[MessageSending] = None
|
||||
reply_message_ids = [] # 记录实际发送的消息ID
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
platform=chat.platform,
|
||||
)
|
||||
|
||||
for i, msg_text in enumerate(response_set):
|
||||
# 为每个消息片段生成唯一ID
|
||||
part_message_id = f"{thinking_id}_{i}"
|
||||
message_segment = Seg(type="text", data=msg_text)
|
||||
bot_message = MessageSending(
|
||||
message_id=part_message_id, # 使用片段的唯一ID
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
sender_info=anchor_message.message_info.user_info,
|
||||
message_segment=message_segment,
|
||||
reply=anchor_message, # 回复原始锚点
|
||||
is_head=not mark_head,
|
||||
is_emoji=False,
|
||||
thinking_start_time=thinking_start_time, # 传递原始思考开始时间
|
||||
)
|
||||
try:
|
||||
if not mark_head:
|
||||
mark_head = True
|
||||
first_bot_msg = bot_message # 保存第一个成功发送的消息对象
|
||||
await self.heart_fc_sender.type_and_send_message(bot_message, typing=False)
|
||||
else:
|
||||
await self.heart_fc_sender.type_and_send_message(bot_message, typing=True)
|
||||
|
||||
reply_message_ids.append(part_message_id) # 记录我们生成的ID
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"{self.log_prefix}[Sender-{thinking_id}] 发送回复片段 {i} ({part_message_id}) 时失败: {e}"
|
||||
)
|
||||
# 这里可以选择是继续发送下一个片段还是中止
|
||||
|
||||
# 在尝试发送完所有片段后,完成原始的 thinking_id 状态
|
||||
try:
|
||||
await self.heart_fc_sender.complete_thinking(chat_id, thinking_id)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}[Sender-{thinking_id}] 完成思考状态 {thinking_id} 时出错: {e}")
|
||||
|
||||
return first_bot_msg # 返回第一个成功发送的消息对象
|
||||
|
||||
async def _handle_emoji(self, anchor_message: Optional[MessageRecv], response_set: List[str], send_emoji: str = ""):
|
||||
"""处理表情包 (尝试锚定到 anchor_message),使用 HeartFCSender"""
|
||||
if not anchor_message or not anchor_message.chat_stream:
|
||||
logger.error(f"{self.log_prefix} 无法处理表情包,缺少有效的锚点消息或聊天流。")
|
||||
return
|
||||
|
||||
chat = anchor_message.chat_stream
|
||||
|
||||
emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji)
|
||||
|
||||
if emoji_raw:
|
||||
emoji_path, description = emoji_raw
|
||||
|
||||
emoji_cq = image_path_to_base64(emoji_path)
|
||||
thinking_time_point = round(time.time(), 2) # 用于唯一ID
|
||||
message_segment = Seg(type="emoji", data=emoji_cq)
|
||||
bot_user_info = UserInfo(
|
||||
user_id=global_config.BOT_QQ,
|
||||
user_nickname=global_config.BOT_NICKNAME,
|
||||
platform=anchor_message.message_info.platform,
|
||||
)
|
||||
bot_message = MessageSending(
|
||||
message_id="me" + str(thinking_time_point), # 表情消息的唯一ID
|
||||
chat_stream=chat,
|
||||
bot_user_info=bot_user_info,
|
||||
sender_info=anchor_message.message_info.user_info,
|
||||
message_segment=message_segment,
|
||||
reply=anchor_message, # 回复原始锚点
|
||||
is_head=False, # 表情通常不是头部消息
|
||||
is_emoji=True,
|
||||
# 不需要 thinking_start_time
|
||||
)
|
||||
|
||||
try:
|
||||
await self.heart_fc_sender.send_and_store(bot_message)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 发送表情包 {bot_message.message_info.message_id} 时失败: {e}")
|
||||
320
src/chat/focus_chat/expressors/exprssion_learner.py
Normal file
320
src/chat/focus_chat/expressors/exprssion_learner.py
Normal file
@@ -0,0 +1,320 @@
|
||||
import time
|
||||
import random
|
||||
from typing import List, Dict, Optional, Any, Tuple
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.chat.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.chat_message_builder import get_raw_msg_by_timestamp_random, build_readable_messages
|
||||
from src.chat.focus_chat.heartflow_prompt_builder import Prompt, global_prompt_manager
|
||||
import os
|
||||
import json
|
||||
|
||||
|
||||
MAX_EXPRESSION_COUNT = 300
|
||||
|
||||
logger = get_logger("expressor")
|
||||
|
||||
|
||||
def init_prompt() -> None:
|
||||
learn_style_prompt = """
|
||||
{chat_str}
|
||||
|
||||
请从上面这段群聊中概括除了人名为"麦麦"之外的人的语言风格,只考虑文字,不要考虑表情包和图片
|
||||
不要涉及具体的人名,只考虑语言风格
|
||||
语言风格包含特殊内容和情感
|
||||
思考有没有特殊的梗,一并总结成语言风格
|
||||
总结成如下格式的规律,总结的内容要详细,但具有概括性:
|
||||
当"xxx"时,可以"xxx", xxx不超过10个字
|
||||
|
||||
例如:
|
||||
当"表示十分惊叹"时,使用"我嘞个xxxx"
|
||||
当"表示讽刺的赞同,不想讲道理"时,使用"对对对"
|
||||
当"想说明某个观点,但懒得明说",使用"懂的都懂"
|
||||
|
||||
注意不要总结你自己的发言
|
||||
现在请你概括
|
||||
"""
|
||||
Prompt(learn_style_prompt, "learn_style_prompt")
|
||||
|
||||
personality_expression_prompt = """
|
||||
{personality}
|
||||
|
||||
请从以上人设中总结出这个角色可能的语言风格
|
||||
思考回复的特殊内容和情感
|
||||
思考有没有特殊的梗,一并总结成语言风格
|
||||
总结成如下格式的规律,总结的内容要详细,但具有概括性:
|
||||
当"xxx"时,可以"xxx", xxx不超过10个字
|
||||
|
||||
例如:
|
||||
当"表示十分惊叹"时,使用"我嘞个xxxx"
|
||||
当"表示讽刺的赞同,不想讲道理"时,使用"对对对"
|
||||
当"想说明某个观点,但懒得明说",使用"懂的都懂"
|
||||
|
||||
现在请你概括
|
||||
"""
|
||||
Prompt(personality_expression_prompt, "personality_expression_prompt")
|
||||
|
||||
learn_grammar_prompt = """
|
||||
{chat_str}
|
||||
|
||||
请从上面这段群聊中概括除了人名为"麦麦"之外的人的语法和句法特点,只考虑纯文字,不要考虑表情包和图片
|
||||
不要总结【图片】,【动画表情】,[图片],[动画表情],不总结 表情符号
|
||||
不要涉及具体的人名,只考虑语法和句法特点,
|
||||
语法和句法特点要包括,句子长短(具体字数),如何分局,有何种语病,如何拆分句子。
|
||||
总结成如下格式的规律,总结的内容要简洁,不浮夸:
|
||||
当"xxx"时,可以"xxx"
|
||||
|
||||
例如:
|
||||
当"表达观点较复杂"时,使用"省略主语"的句法
|
||||
当"不用详细说明的一般表达"时,使用"非常简洁的句子"的句法
|
||||
当"需要单纯简单的确认"时,使用"单字或几个字的肯定"的句法
|
||||
|
||||
注意不要总结你自己的发言
|
||||
现在请你概括
|
||||
"""
|
||||
Prompt(learn_grammar_prompt, "learn_grammar_prompt")
|
||||
|
||||
|
||||
class ExpressionLearner:
|
||||
def __init__(self) -> None:
|
||||
self.express_learn_model: LLMRequest = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=0.1,
|
||||
max_tokens=256,
|
||||
request_type="response_heartflow",
|
||||
)
|
||||
|
||||
async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
|
||||
"""
|
||||
读取/data/expression/learnt/{chat_id}/expressions.json和/data/expression/personality/expressions.json
|
||||
返回(learnt_expressions, personality_expressions)
|
||||
"""
|
||||
learnt_style_file = os.path.join("data", "expression", "learnt_style", str(chat_id), "expressions.json")
|
||||
learnt_grammar_file = os.path.join("data", "expression", "learnt_grammar", str(chat_id), "expressions.json")
|
||||
personality_file = os.path.join("data", "expression", "personality", "expressions.json")
|
||||
learnt_style_expressions = []
|
||||
learnt_grammar_expressions = []
|
||||
personality_expressions = []
|
||||
if os.path.exists(learnt_style_file):
|
||||
with open(learnt_style_file, "r", encoding="utf-8") as f:
|
||||
learnt_style_expressions = json.load(f)
|
||||
if os.path.exists(learnt_grammar_file):
|
||||
with open(learnt_grammar_file, "r", encoding="utf-8") as f:
|
||||
learnt_grammar_expressions = json.load(f)
|
||||
if os.path.exists(personality_file):
|
||||
with open(personality_file, "r", encoding="utf-8") as f:
|
||||
personality_expressions = json.load(f)
|
||||
return learnt_style_expressions, learnt_grammar_expressions, personality_expressions
|
||||
|
||||
def is_similar(self, s1: str, s2: str) -> bool:
|
||||
"""
|
||||
判断两个字符串是否相似(只考虑长度大于5且有80%以上重合,不考虑子串)
|
||||
"""
|
||||
if not s1 or not s2:
|
||||
return False
|
||||
min_len = min(len(s1), len(s2))
|
||||
if min_len < 5:
|
||||
return False
|
||||
same = sum(1 for a, b in zip(s1, s2) if a == b)
|
||||
return same / min_len > 0.8
|
||||
|
||||
async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]:
|
||||
"""
|
||||
学习并存储表达方式,分别学习语言风格和句法特点
|
||||
"""
|
||||
learnt_style: Optional[List[Tuple[str, str, str]]] = await self.learn_and_store(type="style", num=3)
|
||||
if not learnt_style:
|
||||
return []
|
||||
|
||||
learnt_grammar: Optional[List[Tuple[str, str, str]]] = await self.learn_and_store(type="grammar", num=2)
|
||||
if not learnt_grammar:
|
||||
return []
|
||||
|
||||
return learnt_style, learnt_grammar
|
||||
|
||||
async def learn_and_store(self, type: str, num: int = 10) -> List[Tuple[str, str, str]]:
|
||||
"""
|
||||
选择从当前到最近1小时内的随机num条消息,然后学习这些消息的表达方式
|
||||
type: "style" or "grammar"
|
||||
"""
|
||||
if type == "style":
|
||||
type_str = "语言风格"
|
||||
elif type == "grammar":
|
||||
type_str = "句法特点"
|
||||
else:
|
||||
raise ValueError(f"Invalid type: {type}")
|
||||
logger.info(f"开始学习{type_str}...")
|
||||
learnt_expressions: Optional[List[Tuple[str, str, str]]] = await self.learn_expression(type, num)
|
||||
logger.info(f"学习到{len(learnt_expressions) if learnt_expressions else 0}条{type_str}")
|
||||
# learnt_expressions: List[(chat_id, situation, style)]
|
||||
|
||||
if not learnt_expressions:
|
||||
logger.info(f"没有学习到{type_str}")
|
||||
return []
|
||||
|
||||
# 按chat_id分组
|
||||
chat_dict: Dict[str, List[Dict[str, str]]] = {}
|
||||
for chat_id, situation, style in learnt_expressions:
|
||||
if chat_id not in chat_dict:
|
||||
chat_dict[chat_id] = []
|
||||
chat_dict[chat_id].append({"situation": situation, "style": style})
|
||||
# 存储到/data/expression/对应chat_id/expressions.json
|
||||
for chat_id, expr_list in chat_dict.items():
|
||||
dir_path = os.path.join("data", "expression", f"learnt_{type}", str(chat_id))
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
file_path = os.path.join(dir_path, "expressions.json")
|
||||
# 若已存在,先读出合并
|
||||
if os.path.exists(file_path):
|
||||
old_data: List[Dict[str, str, str]] = []
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
old_data = json.load(f)
|
||||
except Exception:
|
||||
old_data = []
|
||||
else:
|
||||
old_data = []
|
||||
# 超过最大数量时,20%概率移除count=1的项
|
||||
if len(old_data) >= MAX_EXPRESSION_COUNT:
|
||||
new_old_data = []
|
||||
for item in old_data:
|
||||
if item.get("count", 1) == 1 and random.random() < 0.2:
|
||||
continue # 20%概率移除
|
||||
new_old_data.append(item)
|
||||
old_data = new_old_data
|
||||
# 合并逻辑
|
||||
for new_expr in expr_list:
|
||||
found = False
|
||||
for old_expr in old_data:
|
||||
if self.is_similar(new_expr["situation"], old_expr.get("situation", "")) and self.is_similar(
|
||||
new_expr["style"], old_expr.get("style", "")
|
||||
):
|
||||
found = True
|
||||
# 50%概率替换
|
||||
if random.random() < 0.5:
|
||||
old_expr["situation"] = new_expr["situation"]
|
||||
old_expr["style"] = new_expr["style"]
|
||||
old_expr["count"] = old_expr.get("count", 1) + 1
|
||||
break
|
||||
if not found:
|
||||
new_expr["count"] = 1
|
||||
old_data.append(new_expr)
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(old_data, f, ensure_ascii=False, indent=2)
|
||||
return learnt_expressions
|
||||
|
||||
async def learn_expression(self, type: str, num: int = 10) -> Optional[List[Tuple[str, str, str]]]:
|
||||
"""选择从当前到最近1小时内的随机num条消息,然后学习这些消息的表达方式
|
||||
|
||||
Args:
|
||||
type: "style" or "grammar"
|
||||
"""
|
||||
if type == "style":
|
||||
type_str = "语言风格"
|
||||
prompt = "learn_style_prompt"
|
||||
elif type == "grammar":
|
||||
type_str = "句法特点"
|
||||
prompt = "learn_grammar_prompt"
|
||||
else:
|
||||
raise ValueError(f"Invalid type: {type}")
|
||||
|
||||
current_time = time.time()
|
||||
random_msg: Optional[List[Dict[str, Any]]] = get_raw_msg_by_timestamp_random(
|
||||
current_time - 3600 * 24, current_time, limit=num
|
||||
)
|
||||
if not random_msg:
|
||||
return None
|
||||
# 转化成str
|
||||
chat_id: str = random_msg[0]["chat_id"]
|
||||
random_msg_str: str = await build_readable_messages(random_msg, timestamp_mode="normal")
|
||||
|
||||
prompt: str = await global_prompt_manager.format_prompt(
|
||||
prompt,
|
||||
chat_str=random_msg_str,
|
||||
)
|
||||
|
||||
logger.debug(f"学习{type_str}的prompt: {prompt}")
|
||||
|
||||
try:
|
||||
response, _ = await self.express_learn_model.generate_response_async(prompt)
|
||||
except Exception as e:
|
||||
logger.error(f"学习{type_str}失败: {e}")
|
||||
return None
|
||||
|
||||
logger.debug(f"学习{type_str}的response: {response}")
|
||||
|
||||
expressions: List[Tuple[str, str, str]] = self.parse_expression_response(response, chat_id)
|
||||
|
||||
return expressions
|
||||
|
||||
def parse_expression_response(self, response: str, chat_id: str) -> List[Tuple[str, str, str]]:
|
||||
"""
|
||||
解析LLM返回的表达风格总结,每一行提取"当"和"使用"之间的内容,存储为(situation, style)元组
|
||||
"""
|
||||
expressions: List[Tuple[str, str, str]] = []
|
||||
for line in response.splitlines():
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# 查找"当"和下一个引号
|
||||
idx_when = line.find('当"')
|
||||
if idx_when == -1:
|
||||
continue
|
||||
idx_quote1 = idx_when + 1
|
||||
idx_quote2 = line.find('"', idx_quote1 + 1)
|
||||
if idx_quote2 == -1:
|
||||
continue
|
||||
situation = line[idx_quote1 + 1 : idx_quote2]
|
||||
# 查找"使用"
|
||||
idx_use = line.find('使用"', idx_quote2)
|
||||
if idx_use == -1:
|
||||
continue
|
||||
idx_quote3 = idx_use + 2
|
||||
idx_quote4 = line.find('"', idx_quote3 + 1)
|
||||
if idx_quote4 == -1:
|
||||
continue
|
||||
style = line[idx_quote3 + 1 : idx_quote4]
|
||||
expressions.append((chat_id, situation, style))
|
||||
return expressions
|
||||
|
||||
async def extract_and_store_personality_expressions(self):
|
||||
"""
|
||||
检查data/expression/personality目录,不存在则创建。
|
||||
用peronality变量作为chat_str,调用LLM生成表达风格,解析后count=100,存储到expressions.json。
|
||||
"""
|
||||
dir_path = os.path.join("data", "expression", "personality")
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
file_path = os.path.join(dir_path, "expressions.json")
|
||||
|
||||
# 构建prompt
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"personality_expression_prompt",
|
||||
personality=global_config.expression_style,
|
||||
)
|
||||
logger.info(f"个性表达方式提取prompt: {prompt}")
|
||||
|
||||
try:
|
||||
response, _ = await self.express_learn_model.generate_response_async(prompt)
|
||||
except Exception as e:
|
||||
logger.error(f"个性表达方式提取失败: {e}")
|
||||
return
|
||||
|
||||
logger.info(f"个性表达方式提取response: {response}")
|
||||
# chat_id用personality
|
||||
expressions = self.parse_expression_response(response, "personality")
|
||||
# 转为dict并count=100
|
||||
result = []
|
||||
for _, situation, style in expressions:
|
||||
result.append({"situation": situation, "style": style, "count": 100})
|
||||
# 超过50条时随机删除多余的,只保留50条
|
||||
if len(result) > 50:
|
||||
remove_count = len(result) - 50
|
||||
remove_indices = set(random.sample(range(len(result)), remove_count))
|
||||
result = [item for idx, item in enumerate(result) if idx not in remove_indices]
|
||||
with open(file_path, "w", encoding="utf-8") as f:
|
||||
json.dump(result, f, ensure_ascii=False, indent=2)
|
||||
logger.info(f"已写入{len(result)}条表达到{file_path}")
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
expression_learner = ExpressionLearner()
|
||||
Reference in New Issue
Block a user