This commit is contained in:
tt-P607
2025-09-12 22:50:55 +08:00
14 changed files with 524 additions and 83 deletions

View File

@@ -30,50 +30,55 @@ DATA_PATH = os.path.join(ROOT_PATH, "data")
qa_manager = None
inspire_manager = None
# 检查LPMM知识库是否启用
if global_config.lpmm_knowledge.enable:
logger.info("正在初始化Mai-LPMM")
logger.info("创建LLM客户端")
# 初始化Embedding库
embed_manager = EmbeddingManager()
logger.info("正在从文件加载Embedding库")
try:
embed_manager.load_from_file()
except Exception as e:
logger.warning(f"此消息不会影响正常使用从文件加载Embedding库时{e}")
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("Embedding库加载完成")
# 初始化KG
kg_manager = KGManager()
logger.info("正在从文件加载KG")
try:
kg_manager.load_from_file()
except Exception as e:
logger.warning(f"此消息不会影响正常使用从文件加载KG时{e}")
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("KG加载完成")
def initialize_lpmm_knowledge():
"""初始化LPMM知识库"""
global qa_manager, inspire_manager
logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}")
logger.info(f"KG边数量{len(kg_manager.graph.get_edge_list())}")
# 检查LPMM知识库是否启用
if global_config.lpmm_knowledge.enable:
logger.info("正在初始化Mai-LPMM")
logger.info("创建LLM客户端")
# 数据比对:Embedding库与KG的段落hash集合
for pg_hash in kg_manager.stored_paragraph_hashes:
key = f"paragraph-{pg_hash}"
if key not in embed_manager.stored_pg_hashes:
logger.warning(f"KG中存在Embedding库中不存在的段落{key}")
# 初始化Embedding库
embed_manager = EmbeddingManager()
logger.info("正在从文件加载Embedding库")
try:
embed_manager.load_from_file()
except Exception as e:
logger.warning(f"此消息不会影响正常使用从文件加载Embedding库时{e}")
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("Embedding库加载完成")
# 初始化KG
kg_manager = KGManager()
logger.info("正在从文件加载KG")
try:
kg_manager.load_from_file()
except Exception as e:
logger.warning(f"此消息不会影响正常使用从文件加载KG时{e}")
# logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("KG加载完成")
# 问答系统(用于知识库)
qa_manager = QAManager(
embed_manager,
kg_manager,
)
logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}")
logger.info(f"KG边数量{len(kg_manager.graph.get_edge_list())}")
# # 记忆激活(用于记忆库)
# inspire_manager = MemoryActiveManager(
# embed_manager,
# llm_client_list[global_config["embedding"]["provider"]],
# )
else:
logger.info("LPMM知识库已禁用跳过初始化")
# 创建空的占位符对象,避免导入错误
# 数据比对Embedding库与KG的段落hash集合
for pg_hash in kg_manager.stored_paragraph_hashes:
key = f"paragraph-{pg_hash}"
if key not in embed_manager.stored_pg_hashes:
logger.warning(f"KG中存在Embedding库中不存在的段落{key}")
# 问答系统(用于知识库)
qa_manager = QAManager(
embed_manager,
kg_manager,
)
# # 记忆激活(用于记忆库)
# inspire_manager = MemoryActiveManager(
# embed_manager,
# llm_client_list[global_config["embedding"]["provider"]],
# )
else:
logger.info("LPMM知识库已禁用跳过初始化")
# 创建空的占位符对象,避免导入错误

View File

@@ -86,7 +86,6 @@ def init_prompt():
### 当前群聊中的所有人的聊天记录:
{background_dialogue_prompt}
### 其他群聊中的聊天记录
{cross_context_block}
### 当前群聊中正在与你对话的聊天记录
@@ -97,14 +96,10 @@ def init_prompt():
{reply_style}
{keywords_reaction_prompt}
- (如果有)你可以参考以下你在聊天中学到的表达方式:
{expression_habits_block}
## 工具信息
(如果有)你可以参考以下可能有帮助的工具返回的信息:
{tool_info_block}
## 知识库信息
(如果有)你可以参考以下可能有帮助的知识库中的信息:
{knowledge_prompt}
## 其他信息
@@ -114,8 +109,8 @@ def init_prompt():
{action_descriptions}
## 任务
### 梗概
- 你正在一个QQ群里聊天你需要理解整个群的聊天动态和话题走向并做出自然的回应。
*你正在一个QQ群里聊天你需要理解整个群的聊天动态和话题走向并做出自然的回应。*
### 核心任务
- 你现在的主要任务是和 {sender_name} 聊天。同时,也有其他用户会参与聊天,你可以参考他们的回复内容,但是你现在想回复{sender_name}的发言。

View File

@@ -550,7 +550,7 @@ class Prompt:
# 构建表达习惯块
if selected_expressions:
style_habits_str = "\n".join([f"- {expr}" for expr in selected_expressions])
expression_habits_block = f"你可以参考以下的语言习惯,当情景合适就使用,但不要生硬使用,以合理的方式结合到你的回复中:\n{style_habits_str}"
expression_habits_block = f"- 你可以参考以下的语言习惯,当情景合适就使用,但不要生硬使用,以合理的方式结合到你的回复中:\n{style_habits_str}"
else:
expression_habits_block = ""
@@ -652,7 +652,7 @@ class Prompt:
# 构建工具信息块
if tool_results:
tool_info_parts = ["以下是你通过工具获取到的实时信息:"]
tool_info_parts = ["## 工具信息","以下是你通过工具获取到的实时信息:"]
for tool_result in tool_results:
tool_name = tool_result.get("tool_name", "unknown")
content = tool_result.get("content", "")
@@ -697,7 +697,7 @@ class Prompt:
# 构建知识块
if knowledge_results and knowledge_results.get("knowledge_items"):
knowledge_parts = ["以下是与你当前对话相关的知识信息:"]
knowledge_parts = ["## 知识库信息","以下是与你当前对话相关的知识信息:"]
for item in knowledge_results["knowledge_items"]:
content = item.get("content", "")

View File

@@ -0,0 +1,53 @@
import copy
from typing import Any
class BaseDataModel:
def deepcopy(self):
return copy.deepcopy(self)
def temporarily_transform_class_to_dict(obj: Any) -> Any:
# sourcery skip: assign-if-exp, reintroduce-else
"""
将对象或容器中的 BaseDataModel 子类(类对象)或 BaseDataModel 实例
递归转换为普通 dict不修改原对象。
- 对于类对象isinstance(value, type) 且 issubclass(..., BaseDataModel)
读取类的 __dict__ 中非 dunder 项并递归转换。
- 对于实例isinstance(value, BaseDataModel)),读取 vars(instance) 并递归转换。
"""
def _transform(value: Any) -> Any:
# 值是类对象且为 BaseDataModel 的子类
if isinstance(value, type) and issubclass(value, BaseDataModel):
return {k: _transform(v) for k, v in value.__dict__.items() if not k.startswith("__") and not callable(v)}
# 值是 BaseDataModel 的实例
if isinstance(value, BaseDataModel):
return {k: _transform(v) for k, v in vars(value).items()}
# 常见容器类型,递归处理
if isinstance(value, dict):
return {k: _transform(v) for k, v in value.items()}
if isinstance(value, list):
return [_transform(v) for v in value]
if isinstance(value, tuple):
return tuple(_transform(v) for v in value)
if isinstance(value, set):
return {_transform(v) for v in value}
# 基本类型,直接返回
return value
result = _transform(obj)
def flatten(target_dict: dict):
flat_dict = {}
for k, v in target_dict.items():
if isinstance(v, dict):
# 递归扁平化子字典
sub_flat = flatten(v)
flat_dict.update(sub_flat)
else:
flat_dict[k] = v
return flat_dict
return flatten(result) if isinstance(result, dict) else result

View File

@@ -0,0 +1,235 @@
import json
from typing import Optional, Any, Dict
from dataclasses import dataclass, field
from . import BaseDataModel
@dataclass
class DatabaseUserInfo(BaseDataModel):
platform: str = field(default_factory=str)
user_id: str = field(default_factory=str)
user_nickname: str = field(default_factory=str)
user_cardname: Optional[str] = None
# def __post_init__(self):
# assert isinstance(self.platform, str), "platform must be a string"
# assert isinstance(self.user_id, str), "user_id must be a string"
# assert isinstance(self.user_nickname, str), "user_nickname must be a string"
# assert isinstance(self.user_cardname, str) or self.user_cardname is None, (
# "user_cardname must be a string or None"
# )
@dataclass
class DatabaseGroupInfo(BaseDataModel):
group_id: str = field(default_factory=str)
group_name: str = field(default_factory=str)
group_platform: Optional[str] = None
# def __post_init__(self):
# assert isinstance(self.group_id, str), "group_id must be a string"
# assert isinstance(self.group_name, str), "group_name must be a string"
# assert isinstance(self.group_platform, str) or self.group_platform is None, (
# "group_platform must be a string or None"
# )
@dataclass
class DatabaseChatInfo(BaseDataModel):
stream_id: str = field(default_factory=str)
platform: str = field(default_factory=str)
create_time: float = field(default_factory=float)
last_active_time: float = field(default_factory=float)
user_info: DatabaseUserInfo = field(default_factory=DatabaseUserInfo)
group_info: Optional[DatabaseGroupInfo] = None
# def __post_init__(self):
# assert isinstance(self.stream_id, str), "stream_id must be a string"
# assert isinstance(self.platform, str), "platform must be a string"
# assert isinstance(self.create_time, float), "create_time must be a float"
# assert isinstance(self.last_active_time, float), "last_active_time must be a float"
# assert isinstance(self.user_info, DatabaseUserInfo), "user_info must be a DatabaseUserInfo instance"
# assert isinstance(self.group_info, DatabaseGroupInfo) or self.group_info is None, (
# "group_info must be a DatabaseGroupInfo instance or None"
# )
@dataclass(init=False)
class DatabaseMessages(BaseDataModel):
def __init__(
self,
message_id: str = "",
time: float = 0.0,
chat_id: str = "",
reply_to: Optional[str] = None,
interest_value: Optional[float] = None,
key_words: Optional[str] = None,
key_words_lite: Optional[str] = None,
is_mentioned: Optional[bool] = None,
is_at: Optional[bool] = None,
reply_probability_boost: Optional[float] = None,
processed_plain_text: Optional[str] = None,
display_message: Optional[str] = None,
priority_mode: Optional[str] = None,
priority_info: Optional[str] = None,
additional_config: Optional[str] = None,
is_emoji: bool = False,
is_picid: bool = False,
is_command: bool = False,
is_notify: bool = False,
selected_expressions: Optional[str] = None,
user_id: str = "",
user_nickname: str = "",
user_cardname: Optional[str] = None,
user_platform: str = "",
chat_info_group_id: Optional[str] = None,
chat_info_group_name: Optional[str] = None,
chat_info_group_platform: Optional[str] = None,
chat_info_user_id: str = "",
chat_info_user_nickname: str = "",
chat_info_user_cardname: Optional[str] = None,
chat_info_user_platform: str = "",
chat_info_stream_id: str = "",
chat_info_platform: str = "",
chat_info_create_time: float = 0.0,
chat_info_last_active_time: float = 0.0,
**kwargs: Any,
):
self.message_id = message_id
self.time = time
self.chat_id = chat_id
self.reply_to = reply_to
self.interest_value = interest_value
self.key_words = key_words
self.key_words_lite = key_words_lite
self.is_mentioned = is_mentioned
self.is_at = is_at
self.reply_probability_boost = reply_probability_boost
self.processed_plain_text = processed_plain_text
self.display_message = display_message
self.priority_mode = priority_mode
self.priority_info = priority_info
self.additional_config = additional_config
self.is_emoji = is_emoji
self.is_picid = is_picid
self.is_command = is_command
self.is_notify = is_notify
self.selected_expressions = selected_expressions
self.group_info: Optional[DatabaseGroupInfo] = None
self.user_info = DatabaseUserInfo(
user_id=user_id,
user_nickname=user_nickname,
user_cardname=user_cardname,
platform=user_platform,
)
if chat_info_group_id and chat_info_group_name:
self.group_info = DatabaseGroupInfo(
group_id=chat_info_group_id,
group_name=chat_info_group_name,
group_platform=chat_info_group_platform,
)
self.chat_info = DatabaseChatInfo(
stream_id=chat_info_stream_id,
platform=chat_info_platform,
create_time=chat_info_create_time,
last_active_time=chat_info_last_active_time,
user_info=DatabaseUserInfo(
user_id=chat_info_user_id,
user_nickname=chat_info_user_nickname,
user_cardname=chat_info_user_cardname,
platform=chat_info_user_platform,
),
group_info=self.group_info,
)
if kwargs:
for key, value in kwargs.items():
setattr(self, key, value)
# def __post_init__(self):
# assert isinstance(self.message_id, str), "message_id must be a string"
# assert isinstance(self.time, float), "time must be a float"
# assert isinstance(self.chat_id, str), "chat_id must be a string"
# assert isinstance(self.reply_to, str) or self.reply_to is None, "reply_to must be a string or None"
# assert isinstance(self.interest_value, float) or self.interest_value is None, (
# "interest_value must be a float or None"
# )
def flatten(self) -> Dict[str, Any]:
"""
将消息数据模型转换为字典格式,便于存储或传输
"""
return {
"message_id": self.message_id,
"time": self.time,
"chat_id": self.chat_id,
"reply_to": self.reply_to,
"interest_value": self.interest_value,
"key_words": self.key_words,
"key_words_lite": self.key_words_lite,
"is_mentioned": self.is_mentioned,
"is_at": self.is_at,
"reply_probability_boost": self.reply_probability_boost,
"processed_plain_text": self.processed_plain_text,
"display_message": self.display_message,
"priority_mode": self.priority_mode,
"priority_info": self.priority_info,
"additional_config": self.additional_config,
"is_emoji": self.is_emoji,
"is_picid": self.is_picid,
"is_command": self.is_command,
"is_notify": self.is_notify,
"selected_expressions": self.selected_expressions,
"user_id": self.user_info.user_id,
"user_nickname": self.user_info.user_nickname,
"user_cardname": self.user_info.user_cardname,
"user_platform": self.user_info.platform,
"chat_info_group_id": self.group_info.group_id if self.group_info else None,
"chat_info_group_name": self.group_info.group_name if self.group_info else None,
"chat_info_group_platform": self.group_info.group_platform if self.group_info else None,
"chat_info_stream_id": self.chat_info.stream_id,
"chat_info_platform": self.chat_info.platform,
"chat_info_create_time": self.chat_info.create_time,
"chat_info_last_active_time": self.chat_info.last_active_time,
"chat_info_user_platform": self.chat_info.user_info.platform,
"chat_info_user_id": self.chat_info.user_info.user_id,
"chat_info_user_nickname": self.chat_info.user_info.user_nickname,
"chat_info_user_cardname": self.chat_info.user_info.user_cardname,
}
@dataclass(init=False)
class DatabaseActionRecords(BaseDataModel):
def __init__(
self,
action_id: str,
time: float,
action_name: str,
action_data: str,
action_done: bool,
action_build_into_prompt: bool,
action_prompt_display: str,
chat_id: str,
chat_info_stream_id: str,
chat_info_platform: str,
):
self.action_id = action_id
self.time = time
self.action_name = action_name
if isinstance(action_data, str):
self.action_data = json.loads(action_data)
else:
raise ValueError("action_data must be a JSON string")
self.action_done = action_done
self.action_build_into_prompt = action_build_into_prompt
self.action_prompt_display = action_prompt_display
self.chat_id = chat_id
self.chat_info_stream_id = chat_info_stream_id
self.chat_info_platform = chat_info_platform

View File

@@ -0,0 +1,25 @@
from dataclasses import dataclass, field
from typing import Optional, Dict, TYPE_CHECKING
from . import BaseDataModel
if TYPE_CHECKING:
from .database_data_model import DatabaseMessages
from src.plugin_system.base.component_types import ActionInfo
@dataclass
class TargetPersonInfo(BaseDataModel):
platform: str = field(default_factory=str)
user_id: str = field(default_factory=str)
user_nickname: str = field(default_factory=str)
person_id: Optional[str] = None
person_name: Optional[str] = None
@dataclass
class ActionPlannerInfo(BaseDataModel):
action_type: str = field(default_factory=str)
reasoning: Optional[str] = None
action_data: Optional[Dict] = None
action_message: Optional["DatabaseMessages"] = None
available_actions: Optional[Dict[str, "ActionInfo"]] = None

View File

@@ -0,0 +1,16 @@
from dataclasses import dataclass
from typing import Optional, List, Tuple, TYPE_CHECKING, Any
from . import BaseDataModel
if TYPE_CHECKING:
from src.llm_models.payload_content.tool_option import ToolCall
@dataclass
class LLMGenerationDataModel(BaseDataModel):
content: Optional[str] = None
reasoning: Optional[str] = None
model: Optional[str] = None
tool_calls: Optional[List["ToolCall"]] = None
prompt: Optional[str] = None
selected_expressions: Optional[List[int]] = None
reply_set: Optional[List[Tuple[str, Any]]] = None

View File

@@ -0,0 +1,36 @@
from typing import Optional, TYPE_CHECKING
from dataclasses import dataclass, field
from . import BaseDataModel
if TYPE_CHECKING:
from .database_data_model import DatabaseMessages
@dataclass
class MessageAndActionModel(BaseDataModel):
chat_id: str = field(default_factory=str)
time: float = field(default_factory=float)
user_id: str = field(default_factory=str)
user_platform: str = field(default_factory=str)
user_nickname: str = field(default_factory=str)
user_cardname: Optional[str] = None
processed_plain_text: Optional[str] = None
display_message: Optional[str] = None
chat_info_platform: str = field(default_factory=str)
is_action_record: bool = field(default=False)
action_name: Optional[str] = None
@classmethod
def from_DatabaseMessages(cls, message: "DatabaseMessages"):
return cls(
chat_id=message.chat_id,
time=message.time,
user_id=message.user_info.user_id,
user_platform=message.user_info.platform,
user_nickname=message.user_info.user_nickname,
user_cardname=message.user_info.user_cardname,
processed_plain_text=message.processed_plain_text,
display_message=message.display_message,
chat_info_platform=message.chat_info.platform,
)

View File

@@ -387,6 +387,7 @@ class EmojiConfig(ValidatedConfigBase):
content_filtration: bool = Field(default=False, description="内容过滤")
filtration_prompt: str = Field(default="符合公序良俗", description="过滤提示")
enable_emotion_analysis: bool = Field(default=True, description="启用情感分析")
emoji_selection_mode: Literal["emotion", "description"] = Field(default="emotion", description="表情选择模式")
max_context_emojis: int = Field(default=30, description="每次随机传递给LLM的表情包最大数量0为全部")

View File

@@ -116,9 +116,9 @@ class MainSystem:
# 停止消息重组器
from src.plugin_system.core.event_manager import event_manager
from src.plugin_system import EventType
import asyncio
asyncio.run(event_manager.trigger_event(EventType.ON_STOP,permission_group="SYSTEM"))
from src.utils.message_chunker import reassembler
import asyncio
loop = asyncio.get_event_loop()
if loop.is_running():
@@ -250,6 +250,11 @@ MoFox_Bot(第三方修改版)
self.hippocampus_manager.initialize()
logger.info("记忆系统初始化成功")
# 初始化LPMM知识库
from src.chat.knowledge.knowledge_lib import initialize_lpmm_knowledge
initialize_lpmm_knowledge()
logger.info("LPMM知识库初始化成功")
# 初始化异步记忆管理器
try:
from src.chat.memory_system.async_memory_optimizer import async_memory_manager

View File

@@ -117,7 +117,7 @@ async def build_cross_context_s4u(
if not cross_context_messages:
return ""
return "# 跨上下文参考\n" + "\n\n".join(cross_context_messages) + "\n"
return "### 其他群聊中的聊天记录\n" + "\n\n".join(cross_context_messages) + "\n"
async def get_chat_history_by_group_name(group_name: str) -> str:

View File

@@ -8,6 +8,8 @@ from src.common.logger import get_logger
from src.chat.message_receive.chat_stream import ChatStream
from src.plugin_system.base.component_types import ActionActivationType, ChatMode, ActionInfo, ComponentType, ChatType
from src.plugin_system.apis import send_api, database_api, message_api
logger = get_logger("base_action")

View File

@@ -99,10 +99,72 @@ class EmojiAction(BaseAction):
available_emotions = list(emotion_map.keys())
emoji_base64, emoji_description = "", ""
if not available_emotions:
logger.warning(f"{self.log_prefix} 获取到的表情包均无情感标签, 将随机发送")
emoji_base64, emoji_description = random.choice(all_emojis_data)
else:
# 4. 根据配置选择不同的表情选择模式
if global_config.emoji.emoji_selection_mode == "emotion":
# --- 情感标签选择模式 ---
if not available_emotions:
logger.warning(f"{self.log_prefix} 获取到的表情包均无情感标签, 将随机发送")
emoji_base64, emoji_description = random.choice(all_emojis_data)
else:
# 获取最近的5条消息内容用于判断
recent_messages = message_api.get_recent_messages(chat_id=self.chat_id, limit=5)
messages_text = ""
if recent_messages:
messages_text = message_api.build_readable_messages(
messages=recent_messages,
timestamp_mode="normal_no_YMD",
truncate=False,
show_actions=False,
)
# 构建prompt让LLM选择情感
prompt = f"""
你是一个正在进行聊天的网友,你需要根据一个理由和最近的聊天记录,从一个情感标签列表中选择最匹配的一个。
这是最近的聊天记录:
{messages_text}
这是理由:“{reason}
这里是可用的情感标签:{available_emotions}
请直接返回最匹配的那个情感标签,不要进行任何解释或添加其他多余的文字。
"""
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix} 生成的LLM Prompt: {prompt}")
else:
logger.debug(f"{self.log_prefix} 生成的LLM Prompt: {prompt}")
# 调用LLM
models = llm_api.get_available_models()
chat_model_config = models.get("planner")
if not chat_model_config:
logger.error(f"{self.log_prefix} 未找到'planner'模型配置无法调用LLM")
return False, "未找到'planner'模型配置"
success, chosen_emotion, _, _ = await llm_api.generate_with_model(
prompt, model_config=chat_model_config, request_type="emoji"
)
if not success:
logger.warning(f"{self.log_prefix} LLM调用失败: {chosen_emotion}, 将随机选择一个表情包")
emoji_base64, emoji_description = random.choice(all_emojis_data)
else:
chosen_emotion = chosen_emotion.strip().replace('"', "").replace("'", "")
logger.info(f"{self.log_prefix} LLM选择的情感: {chosen_emotion}")
# 使用模糊匹配来查找最相关的情感标签
matched_key = next((key for key in emotion_map if chosen_emotion in key), None)
if matched_key:
emoji_base64, emoji_description = random.choice(emotion_map[matched_key])
logger.info(f"{self.log_prefix} 找到匹配情感 '{chosen_emotion}' (匹配到: '{matched_key}') 的表情包: {emoji_description}")
else:
logger.warning(
f"{self.log_prefix} LLM选择的情感 '{chosen_emotion}' 不在可用列表中, 将随机选择一个表情包"
)
emoji_base64, emoji_description = random.choice(all_emojis_data)
elif global_config.emoji.emoji_selection_mode == "description":
# --- 详细描述选择模式 ---
# 获取最近的5条消息内容用于判断
recent_messages = message_api.get_recent_messages(chat_id=self.chat_id, limit=5)
messages_text = ""
@@ -114,51 +176,53 @@ class EmojiAction(BaseAction):
show_actions=False,
)
# 4. 构建prompt让LLM选择情感
# 准备表情描述列表
emoji_descriptions = [desc for _, desc in all_emojis_data]
# 构建prompt让LLM选择描述
prompt = f"""
你是一个正在进行聊天的网友,你需要根据一个理由和最近的聊天记录,从一个情感标签列表中选择最匹配的一个。
你是一个正在进行聊天的网友,你需要根据一个理由和最近的聊天记录,从一个表情包描述列表中选择最匹配的一个。
这是最近的聊天记录:
{messages_text}
这是理由:“{reason}
这里是可用的情感标签{available_emotions}
请直接返回最匹配的那个情感标签,不要进行任何解释或添加其他多余的文字。
这里是可用的表情包描述{emoji_descriptions}
请直接返回最匹配的那个表情包描述,不要进行任何解释或添加其他多余的文字。
"""
logger.debug(f"{self.log_prefix} 生成的LLM Prompt: {prompt}")
if global_config.debug.show_prompt:
logger.info(f"{self.log_prefix} 生成的LLM Prompt: {prompt}")
else:
logger.debug(f"{self.log_prefix} 生成的LLM Prompt: {prompt}")
# 5. 调用LLM
# 调用LLM
models = llm_api.get_available_models()
chat_model_config = models.get("planner")
if not chat_model_config:
logger.error(f"{self.log_prefix} 未找到'utils_small'模型配置无法调用LLM")
return False, "未找到'utils_small'模型配置"
logger.error(f"{self.log_prefix} 未找到'planner'模型配置无法调用LLM")
return False, "未找到'planner'模型配置"
success, chosen_emotion, _, _ = await llm_api.generate_with_model(
success, chosen_description, _, _ = await llm_api.generate_with_model(
prompt, model_config=chat_model_config, request_type="emoji"
)
if not success:
logger.warning(f"{self.log_prefix} LLM调用失败: {chosen_emotion}, 将随机选择一个表情包")
logger.warning(f"{self.log_prefix} LLM调用失败: {chosen_description}, 将随机选择一个表情包")
emoji_base64, emoji_description = random.choice(all_emojis_data)
else:
chosen_emotion = chosen_emotion.strip().replace('"', "").replace("'", "")
logger.info(f"{self.log_prefix} LLM选择的情感: {chosen_emotion}")
chosen_description = chosen_description.strip().replace('"', "").replace("'", "")
logger.info(f"{self.log_prefix} LLM选择的描述: {chosen_description}")
# 使用模糊匹配来查找最相关的情感标签
matched_key = next((key for key in emotion_map if chosen_emotion in key), None)
if matched_key:
emoji_base64, emoji_description = random.choice(emotion_map[matched_key])
logger.info(f"{self.log_prefix} 找到匹配情感 '{chosen_emotion}' (匹配到: '{matched_key}') 的表情包: {emoji_description}")
# 查找与选择的描述匹配的表情包
matched_emoji = next((item for item in all_emojis_data if item == chosen_description), None)
if matched_emoji:
emoji_base64, emoji_description = matched_emoji
logger.info(f"{self.log_prefix} 找到匹配描述 '{chosen_description}' 的表情包")
else:
logger.warning(
f"{self.log_prefix} LLM选择的情感 '{chosen_emotion}' 不在可用列表中, 将随机选择一个表情包"
f"{self.log_prefix} LLM选择的描述 '{chosen_description}' 不在可用列表中, 将随机选择一个表情包"
)
emoji_base64, emoji_description = random.choice(all_emojis_data)
else:
logger.error(f"{self.log_prefix} 无效的表情选择模式: {global_config.emoji.emoji_selection_mode}")
return False, "无效的表情选择模式"
# 7. 发送表情包
success = await self.send_emoji(emoji_base64)

View File

@@ -1,5 +1,5 @@
[inner]
version = "6.8.5"
version = "6.8.6"
#----以下是给开发人员阅读的如果你只是部署了MoFox-Bot不需要阅读----
#如果你想要修改配置文件请递增version的值
@@ -258,6 +258,10 @@ steal_emoji = true # 是否偷取表情包让MoFox-Bot可以将一些表情
content_filtration = false # 是否启用表情包过滤,只有符合该要求的表情包才会被保存
filtration_prompt = "符合公序良俗" # 表情包过滤要求,只有符合该要求的表情包才会被保存
enable_emotion_analysis = false # 是否启用表情包感情关键词二次识别,启用后表情包在第一次识别完毕后将送入第二次大模型识别来总结感情关键词,并构建进回复和决策器的上下文消息中
# 表情选择模式, 可选值为 "emotion" 或 "description"
# emotion: 让大模型从情感标签中选择
# description: 让大模型从详细描述中选择
emoji_selection_mode = "emotion"
max_context_emojis = 30 # 每次随机传递给LLM的表情包详细描述的最大数量0为全部
[memory]