diff --git a/bot.py b/bot.py index 51d76e642..aab5cd4f1 100644 --- a/bot.py +++ b/bot.py @@ -7,17 +7,9 @@ import time import platform import traceback from pathlib import Path -from dotenv import load_dotenv from rich.traceback import install from colorama import init, Fore -if os.path.exists(".env"): - load_dotenv(".env", override=True) - print("成功加载环境变量配置") -else: - print("未找到.env文件,请确保程序所需的环境变量被正确设置") - raise FileNotFoundError(".env 文件不存在,请创建并配置所需的环境变量") - # maim_message imports for console input # 最早期初始化日志系统,确保所有后续模块都使用正确的日志格式 @@ -45,7 +37,6 @@ logger.info(f"已设置工作目录为: {script_dir}") confirm_logger = get_logger("confirm") # 获取没有加载env时的环境变量 -env_mask = {key: os.getenv(key) for key in os.environ} uvicorn_server = None driver = None diff --git a/src/chat/chat_loop/hfc_context.py b/src/chat/chat_loop/hfc_context.py index e6a4b31f3..fe5d283ae 100644 --- a/src/chat/chat_loop/hfc_context.py +++ b/src/chat/chat_loop/hfc_context.py @@ -5,6 +5,7 @@ from src.person_info.relationship_builder_manager import RelationshipBuilder from src.chat.express.expression_learner import ExpressionLearner from src.chat.planner_actions.action_manager import ActionManager from src.chat.chat_loop.hfc_utils import CycleDetail +from src.config.config import global_config if TYPE_CHECKING: from .sleep_manager.wakeup_manager import WakeUpManager @@ -64,7 +65,8 @@ class HfcContext: self.energy_manager: Optional["EnergyManager"] = None self.sleep_manager: Optional["SleepManager"] = None - self.focus_energy = 1 + # 从聊天流获取focus_energy,如果没有则使用配置文件中的值 + self.focus_energy = getattr(self.chat_stream, "focus_energy", global_config.chat.focus_value) self.no_reply_consecutive = 0 self.total_interest = 0.0 # breaking形式下的累积兴趣值 diff --git a/src/chat/message_receive/chat_stream.py b/src/chat/message_receive/chat_stream.py index c43901eab..f5822acfb 100644 --- a/src/chat/message_receive/chat_stream.py +++ b/src/chat/message_receive/chat_stream.py @@ -83,7 +83,8 @@ class ChatStream: self.sleep_pressure = data.get("sleep_pressure", 0.0) if data else 0.0 self.saved = False self.context: ChatMessageContext = None # type: ignore # 用于存储该聊天的上下文信息 - self.focus_energy = 1 + # 从配置文件中读取focus_value,如果没有则使用默认值1.0 + self.focus_energy = data.get("focus_energy", global_config.chat.focus_value) if data else global_config.chat.focus_value self.no_reply_consecutive = 0 self.breaking_accumulated_interest = 0.0 @@ -98,6 +99,7 @@ class ChatStream: "last_active_time": self.last_active_time, "energy_value": self.energy_value, "sleep_pressure": self.sleep_pressure, + "focus_energy": self.focus_energy, "breaking_accumulated_interest": self.breaking_accumulated_interest, } @@ -360,6 +362,7 @@ class ChatManager: "group_name": group_info_d["group_name"] if group_info_d else "", "energy_value": s_data_dict.get("energy_value", 5.0), "sleep_pressure": s_data_dict.get("sleep_pressure", 0.0), + "focus_energy": s_data_dict.get("focus_energy", global_config.chat.focus_value), } # 根据数据库类型选择插入语句 @@ -421,6 +424,7 @@ class ChatManager: "last_active_time": model_instance.last_active_time, "energy_value": model_instance.energy_value, "sleep_pressure": model_instance.sleep_pressure, + "focus_energy": getattr(model_instance, "focus_energy", global_config.chat.focus_value), } loaded_streams_data.append(data_for_from_dict) session.commit() diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index cd9cd23cd..b449641f4 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -140,8 +140,6 @@ def init_prompt(): -------------------------------- {time_block} -{reply_target_block} - 注意不要复读你前面发过的内容,意思相近也不行。 请注意不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。只输出回复内容。 @@ -833,16 +831,22 @@ class DefaultReplyer: reply_message.get("user_id"), # type: ignore ) person_name = await person_info_manager.get_value(person_id, "person_name") - sender = person_name + + # 检查是否是bot自己的名字,如果是则替换为"(你)" + bot_user_id = str(global_config.bot.qq_account) + current_user_id = person_info_manager.get_value_sync(person_id, "user_id") + current_platform = reply_message.get("chat_info_platform") + + if current_user_id == bot_user_id and current_platform == global_config.bot.platform: + sender = f"{person_name}(你)" + else: + # 如果不是bot自己,直接使用person_name + sender = person_name target = reply_message.get("processed_plain_text") person_info_manager = get_person_info_manager() person_id = person_info_manager.get_person_id_by_person_name(sender) - user_id = person_info_manager.get_value_sync(person_id, "user_id") platform = chat_stream.platform - if user_id == global_config.bot.qq_account and platform == global_config.bot.platform: - logger.warning("选取了自身作为回复对象,跳过构建prompt") - return "" target = replace_user_references_sync(target, chat_stream.platform, replace_bot_name=True) @@ -1061,10 +1065,8 @@ class DefaultReplyer: **任务**: 请结合你的智慧和人设,自然地决定是否需要分段。如果需要,请在最恰当的位置插入 `[SPLIT]` 标记。 """ - # 在 "现在,你说:" 之前插入 - parts = prompt_text.rsplit("现在,你说:", 1) - if len(parts) == 2: - prompt_text = f"{parts[0]}{split_instruction}\n现在,你说:{parts[1]}" + # 将分段指令添加到提示词顶部 + prompt_text = f"{split_instruction}\n{prompt_text}" return prompt_text diff --git a/src/chat/utils/prompt.py b/src/chat/utils/prompt.py index b5cf140c5..ae0c9c4b1 100644 --- a/src/chat/utils/prompt.py +++ b/src/chat/utils/prompt.py @@ -312,16 +312,15 @@ class Prompt: except asyncio.TimeoutError as e: logger.error(f"构建Prompt超时: {e}") - raise TimeoutError(f"构建Prompt超时: {e}") + raise TimeoutError(f"构建Prompt超时: {e}") from e except Exception as e: logger.error(f"构建Prompt失败: {e}") - raise RuntimeError(f"构建Prompt失败: {e}") + raise RuntimeError(f"构建Prompt失败: {e}") from e async def _build_context_data(self) -> Dict[str, Any]: """构建智能上下文数据""" # 并行执行所有构建任务 start_time = time.time() - timing_logs = {} try: # 准备构建任务 @@ -381,7 +380,6 @@ class Prompt: results = [] for i in range(0, len(tasks), max_concurrent_tasks): batch_tasks = tasks[i : i + max_concurrent_tasks] - batch_names = task_names[i : i + max_concurrent_tasks] batch_results = await asyncio.wait_for( asyncio.gather(*batch_tasks, return_exceptions=True), timeout=timeout_seconds @@ -520,13 +518,99 @@ class Prompt: async def _build_expression_habits(self) -> Dict[str, Any]: """构建表达习惯""" - # 简化的实现,完整实现需要导入相关模块 - return {"expression_habits_block": ""} + if not global_config.expression.enable_expression: + return {"expression_habits_block": ""} + + try: + from src.chat.express.expression_selector import ExpressionSelector + + # 获取聊天历史用于表情选择 + chat_history = "" + if self.parameters.message_list_before_now_long: + recent_messages = self.parameters.message_list_before_now_long[-10:] + chat_history = build_readable_messages( + recent_messages, + replace_bot_name=True, + timestamp_mode="normal", + truncate=True + ) + + # 创建表情选择器 + expression_selector = ExpressionSelector(self.parameters.chat_id) + + # 选择合适的表情 + selected_expressions = await expression_selector.select_suitable_expressions_llm( + chat_history=chat_history, + current_message=self.parameters.target, + emotional_tone="neutral", + topic_type="general" + ) + + # 构建表达习惯块 + if selected_expressions: + style_habits_str = "\n".join([f"- {expr}" for expr in selected_expressions]) + expression_habits_block = f"你可以参考以下的语言习惯,当情景合适就使用,但不要生硬使用,以合理的方式结合到你的回复中:\n{style_habits_str}" + else: + expression_habits_block = "" + + return {"expression_habits_block": expression_habits_block} + + except Exception as e: + logger.error(f"构建表达习惯失败: {e}") + return {"expression_habits_block": ""} async def _build_memory_block(self) -> Dict[str, Any]: """构建记忆块""" - # 简化的实现 - return {"memory_block": ""} + if not global_config.memory.enable_memory: + return {"memory_block": ""} + + try: + from src.chat.memory_system.memory_activator import MemoryActivator + from src.chat.memory_system.async_instant_memory_wrapper import async_memory + + # 获取聊天历史 + chat_history = "" + if self.parameters.message_list_before_now_long: + recent_messages = self.parameters.message_list_before_now_long[-20:] + chat_history = build_readable_messages( + recent_messages, + replace_bot_name=True, + timestamp_mode="normal", + truncate=True + ) + + # 激活长期记忆 + memory_activator = MemoryActivator() + running_memories = await memory_activator.activate_memory_with_chat_history( + chat_history=chat_history, + target_user=self.parameters.sender, + chat_id=self.parameters.chat_id + ) + + # 获取即时记忆 + instant_memory = await async_memory.get_memory_with_fallback( + chat_id=self.parameters.chat_id, + target_user=self.parameters.sender + ) + + # 构建记忆块 + memory_parts = [] + + if running_memories: + memory_parts.append("以下是当前在聊天中,你回忆起的记忆:") + for memory in running_memories: + memory_parts.append(f"- {memory['content']}") + + if instant_memory: + memory_parts.append(f"- {instant_memory}") + + memory_block = "\n".join(memory_parts) if memory_parts else "" + + return {"memory_block": memory_block} + + except Exception as e: + logger.error(f"构建记忆块失败: {e}") + return {"memory_block": ""} async def _build_relation_info(self) -> Dict[str, Any]: """构建关系信息""" @@ -539,13 +623,106 @@ class Prompt: async def _build_tool_info(self) -> Dict[str, Any]: """构建工具信息""" - # 简化的实现 - return {"tool_info_block": ""} + if not global_config.tool.enable_tool: + return {"tool_info_block": ""} + + try: + from src.plugin_system.core.tool_use import ToolExecutor + + # 获取聊天历史 + chat_history = "" + if self.parameters.message_list_before_now_long: + recent_messages = self.parameters.message_list_before_now_long[-15:] + chat_history = build_readable_messages( + recent_messages, + replace_bot_name=True, + timestamp_mode="normal", + truncate=True + ) + + # 创建工具执行器 + tool_executor = ToolExecutor() + + # 执行工具获取信息 + tool_results, _, _ = await tool_executor.execute_from_chat_message( + sender=self.parameters.sender, + target_message=self.parameters.target, + chat_history=chat_history, + return_details=False + ) + + # 构建工具信息块 + if tool_results: + tool_info_parts = ["以下是你通过工具获取到的实时信息:"] + for tool_result in tool_results: + tool_name = tool_result.get("tool_name", "unknown") + content = tool_result.get("content", "") + result_type = tool_result.get("type", "tool_result") + + tool_info_parts.append(f"- 【{tool_name}】{result_type}: {content}") + + tool_info_parts.append("以上是你获取到的实时信息,请在回复时参考这些信息。") + tool_info_block = "\n".join(tool_info_parts) + else: + tool_info_block = "" + + return {"tool_info_block": tool_info_block} + + except Exception as e: + logger.error(f"构建工具信息失败: {e}") + return {"tool_info_block": ""} async def _build_knowledge_info(self) -> Dict[str, Any]: """构建知识信息""" - # 简化的实现 - return {"knowledge_prompt": ""} + if not global_config.lpmm_knowledge.enable: + return {"knowledge_prompt": ""} + + try: + from src.chat.knowledge.knowledge_lib import QAManager + + # 获取问题文本(当前消息) + question = self.parameters.target or "" + if not question: + return {"knowledge_prompt": ""} + + # 创建QA管理器 + qa_manager = QAManager() + + # 搜索相关知识 + knowledge_results = await qa_manager.get_knowledge( + question=question, + chat_id=self.parameters.chat_id, + max_results=5, + min_similarity=0.5 + ) + + # 构建知识块 + if knowledge_results and knowledge_results.get("knowledge_items"): + knowledge_parts = ["以下是与你当前对话相关的知识信息:"] + + for item in knowledge_results["knowledge_items"]: + content = item.get("content", "") + source = item.get("source", "") + relevance = item.get("relevance", 0.0) + + if content: + if source: + knowledge_parts.append(f"- [{relevance:.2f}] {content} (来源: {source})") + else: + knowledge_parts.append(f"- [{relevance:.2f}] {content}") + + if knowledge_results.get("summary"): + knowledge_parts.append(f"\n知识总结: {knowledge_results['summary']}") + + knowledge_prompt = "\n".join(knowledge_parts) + else: + knowledge_prompt = "" + + return {"knowledge_prompt": knowledge_prompt} + + except Exception as e: + logger.error(f"构建知识信息失败: {e}") + return {"knowledge_prompt": ""} async def _build_cross_context(self) -> Dict[str, Any]: """构建跨群上下文""" diff --git a/src/common/message/api.py b/src/common/message/api.py index eed85c0a9..a85677f47 100644 --- a/src/common/message/api.py +++ b/src/common/message/api.py @@ -24,8 +24,8 @@ def get_global_api() -> MessageServer: # sourcery skip: extract-method # 设置基本参数 kwargs = { - "host": os.environ["HOST"], - "port": int(os.environ["PORT"]), + "host": global_config.server.host, + "port": int(global_config.server.port), "app": get_global_server().get_app(), } diff --git a/src/common/server.py b/src/common/server.py index 24311e54d..30c55d72d 100644 --- a/src/common/server.py +++ b/src/common/server.py @@ -2,7 +2,7 @@ from fastapi import FastAPI, APIRouter from fastapi.middleware.cors import CORSMiddleware # 新增导入 from typing import Optional from uvicorn import Config, Server as UvicornServer -import os +from src.config.config import global_config from rich.traceback import install install(extra_lines=3) @@ -98,5 +98,5 @@ def get_global_server() -> Server: """获取全局服务器实例""" global global_server if global_server is None: - global_server = Server(host=os.environ["HOST"], port=int(os.environ["PORT"])) + global_server = Server(host=global_config.server.host,port=int(global_config.server.port),) return global_server diff --git a/src/config/config.py b/src/config/config.py index ef2d413dd..a38122300 100644 --- a/src/config/config.py +++ b/src/config/config.py @@ -43,8 +43,8 @@ from src.config.official_configs import ( CrossContextConfig, PermissionConfig, CommandConfig, - MaizoneIntercomConfig, PlanningSystemConfig, + ServerConfig, ) from .api_ada_configs import ( @@ -399,9 +399,7 @@ class Config(ValidatedConfigBase): cross_context: CrossContextConfig = Field( default_factory=lambda: CrossContextConfig(), description="跨群聊上下文共享配置" ) - maizone_intercom: MaizoneIntercomConfig = Field( - default_factory=lambda: MaizoneIntercomConfig(), description="Maizone互通组配置" - ) + server: ServerConfig = Field(default_factory=lambda: ServerConfig(), description="主服务器配置") class APIAdapterConfig(ValidatedConfigBase): diff --git a/src/config/official_configs.py b/src/config/official_configs.py index 346217342..9253c92cb 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -496,6 +496,13 @@ class ExperimentalConfig(ValidatedConfigBase): pfc_chatting: bool = Field(default=False, description="启用PFC聊天") +class ServerConfig(ValidatedConfigBase): + """主服务器配置类""" + + host: str = Field(default="127.0.0.1", description="主服务器监听地址") + port: int = Field(default=8080, description="主服务器监听端口") + + class MaimMessageConfig(ValidatedConfigBase): """maim_message配置类""" @@ -678,15 +685,6 @@ class CrossContextConfig(ValidatedConfigBase): enable: bool = Field(default=False, description="是否启用跨群聊上下文共享功能") groups: List[ContextGroup] = Field(default_factory=list, description="上下文共享组列表") - - -class MaizoneIntercomConfig(ValidatedConfigBase): - """Maizone互通组配置""" - - enable: bool = Field(default=False, description="是否启用Maizone互通组功能") - groups: List[ContextGroup] = Field(default_factory=list, description="Maizone互通组列表") - - class CommandConfig(ValidatedConfigBase): """命令系统配置类""" diff --git a/src/plugins/built_in/core_actions/emoji.py b/src/plugins/built_in/core_actions/emoji.py index b3f410a4b..0e3305e6e 100644 --- a/src/plugins/built_in/core_actions/emoji.py +++ b/src/plugins/built_in/core_actions/emoji.py @@ -1,7 +1,5 @@ import random from typing import Tuple -from collections import deque -import json # 导入新插件系统 from src.plugin_system import BaseAction, ActionActivationType, ChatMode @@ -22,7 +20,6 @@ logger = get_logger("emoji") class EmojiAction(BaseAction): """表情动作 - 发送表情包""" - # --- 类级别属性 --- # 激活设置 if global_config.emoji.emoji_activate_type == "llm": activation_type = ActionActivationType.LLM_JUDGE @@ -36,9 +33,6 @@ class EmojiAction(BaseAction): # 动作基本信息 action_name = "emoji" action_description = "发送表情包辅助表达情绪" - - # 最近发送表情的历史记录 - _sent_emoji_history = deque(maxlen=4) # LLM判断提示词 llm_judge_prompt = """ @@ -80,99 +74,102 @@ class EmojiAction(BaseAction): logger.warning(f"{self.log_prefix} 无法获取任何带有描述的有效表情包") return False, "无法获取任何带有描述的有效表情包" - # 3. 根据新配置项决定抽样数量 - sample_size = global_config.emoji.max_context_emojis - if sample_size > 0 and len(all_emojis_obj) > sample_size: - sampled_emojis = random.sample(all_emojis_obj, sample_size) - else: - sampled_emojis = all_emojis_obj # 0表示全部 - - # 4. 为抽样的表情包创建带编号的描述列表 - prompt_emoji_list = [] - for i, emoji in enumerate(sampled_emojis): - prompt_emoji_list.append(f"{i + 1}. {emoji.description}") + # 3. 准备情感数据和后备列表 + emotion_map = {} + all_emojis_data = [] - prompt_emoji_str = "\n".join(prompt_emoji_list) - chosen_emoji_obj: MaiEmoji = None + for emoji in all_emojis_obj: + b64 = image_path_to_base64(emoji.full_path) + if not b64: + continue + + desc = emoji.description + emotions = emoji.emotion + all_emojis_data.append((b64, desc)) - # 5. 获取最近的5条消息内容用于判断 - recent_messages = message_api.get_recent_messages(chat_id=self.chat_id, limit=5) - messages_text = "" - if recent_messages: - messages_text = message_api.build_readable_messages( - messages=recent_messages, - timestamp_mode="normal_no_YMD", - truncate=False, - show_actions=False, + for emo in emotions: + if emo not in emotion_map: + emotion_map[emo] = [] + emotion_map[emo].append((b64, desc)) + + if not all_emojis_data: + logger.warning(f"{self.log_prefix} 无法加载任何有效的表情包数据") + return False, "无法加载任何有效的表情包数据" + + available_emotions = list(emotion_map.keys()) + emoji_base64, emoji_description = "", "" + + if not available_emotions: + logger.warning(f"{self.log_prefix} 获取到的表情包均无情感标签, 将随机发送") + emoji_base64, emoji_description = random.choice(all_emojis_data) + else: + # 获取最近的5条消息内容用于判断 + recent_messages = message_api.get_recent_messages(chat_id=self.chat_id, limit=5) + messages_text = "" + if recent_messages: + messages_text = message_api.build_readable_messages( + messages=recent_messages, + timestamp_mode="normal_no_YMD", + truncate=False, + show_actions=False, + ) + + # 4. 构建prompt让LLM选择情感 + prompt = f""" + 你是一个正在进行聊天的网友,你需要根据一个理由和最近的聊天记录,从一个情感标签列表中选择最匹配的一个。 + 这是最近的聊天记录: + {messages_text} + + 这是理由:“{reason}” + 这里是可用的情感标签:{available_emotions} + 请直接返回最匹配的那个情感标签,不要进行任何解释或添加其他多余的文字。 + """ + + if global_config.debug.show_prompt: + logger.info(f"{self.log_prefix} 生成的LLM Prompt: {prompt}") + else: + logger.debug(f"{self.log_prefix} 生成的LLM Prompt: {prompt}") + + # 5. 调用LLM + models = llm_api.get_available_models() + chat_model_config = models.get("planner") + if not chat_model_config: + logger.error(f"{self.log_prefix} 未找到'utils_small'模型配置,无法调用LLM") + return False, "未找到'utils_small'模型配置" + + success, chosen_emotion, _, _ = await llm_api.generate_with_model( + prompt, model_config=chat_model_config, request_type="emoji" ) - # 6. 构建prompt让LLM选择编号 - prompt = f""" - 你是一个正在进行聊天的网友,你需要根据一个理由和最近的聊天记录,从一个带编号的表情包描述列表中选择最匹配的 **3个** 表情包,并按匹配度从高到低返回它们的编号。 - 这是最近的聊天记录: - {messages_text} - - 这是理由:“{reason}” - 这里是可用的表情包详细描述列表: - {prompt_emoji_str} - 请直接返回一个包含3个最匹配表情包编号的有序JSON列表,例如:[10, 2, 5],不要进行任何解释或添加其他多余的文字。 - """ - - # 7. 调用LLM - models = llm_api.get_available_models() - chat_model_config = models.get("planner") - if not chat_model_config: - logger.error(f"{self.log_prefix} 未找到 'planner' 模型配置,无法调用LLM") - return False, "未找到 'planner' 模型配置" - - success, chosen_indices_str, _, _ = await llm_api.generate_with_model( - prompt, model_config=chat_model_config, request_type="emoji_selection" - ) - - selected_emoji_obj = None - if success: - try: - chosen_indices = json.loads(chosen_indices_str) - if isinstance(chosen_indices, list): - logger.info(f"{self.log_prefix} LLM选择的表情编号候选项: {chosen_indices}") - for index in chosen_indices: - if isinstance(index, int) and 1 <= index <= len(sampled_emojis): - candidate_emoji = sampled_emojis[index - 1] - if candidate_emoji.hash not in self._sent_emoji_history: - selected_emoji_obj = candidate_emoji - break - else: - logger.warning(f"{self.log_prefix} LLM返回的不是一个列表: {chosen_indices_str}") - except (json.JSONDecodeError, TypeError): - logger.warning(f"{self.log_prefix} 解析LLM返回的编号列表失败: {chosen_indices_str}") - - if selected_emoji_obj: - chosen_emoji_obj = selected_emoji_obj - logger.info(f"{self.log_prefix} 从候选项中选择表情: {chosen_emoji_obj.description}") - else: if not success: - logger.warning(f"{self.log_prefix} LLM调用失败, 将随机选择一个表情包") + logger.warning(f"{self.log_prefix} LLM调用失败: {chosen_emotion}, 将随机选择一个表情包") + emoji_base64, emoji_description = random.choice(all_emojis_data) else: - logger.warning(f"{self.log_prefix} 所有候选项均在最近发送历史中, 将随机选择") - - selectable_emojis = [e for e in all_emojis_obj if e.hash not in self._sent_emoji_history] - if not selectable_emojis: - selectable_emojis = all_emojis_obj - chosen_emoji_obj = random.choice(selectable_emojis) + chosen_emotion = chosen_emotion.strip().replace('"', "").replace("'", "") + logger.info(f"{self.log_prefix} LLM选择的情感: {chosen_emotion}") - # 8. 发送表情包并更新历史记录 - if chosen_emoji_obj: - emoji_base64 = image_path_to_base64(chosen_emoji_obj.full_path) - if emoji_base64: - send_success = await self.send_emoji(emoji_base64) - if send_success: - self._sent_emoji_history.append(chosen_emoji_obj.hash) - logger.info(f"{self.log_prefix} 表情包发送成功: {chosen_emoji_obj.description}") - logger.debug(f"{self.log_prefix} 最近表情历史: {list(self._sent_emoji_history)}") - return True, f"发送表情包: {chosen_emoji_obj.description}" + # 使用模糊匹配来查找最相关的情感标签 + matched_key = next((key for key in emotion_map if chosen_emotion in key), None) + + if matched_key: + emoji_base64, emoji_description = random.choice(emotion_map[matched_key]) + logger.info(f"{self.log_prefix} 找到匹配情感 '{chosen_emotion}' (匹配到: '{matched_key}') 的表情包: {emoji_description}") + else: + logger.warning( + f"{self.log_prefix} LLM选择的情感 '{chosen_emotion}' 不在可用列表中, 将随机选择一个表情包" + ) + emoji_base64, emoji_description = random.choice(all_emojis_data) - logger.error(f"{self.log_prefix} 表情包发送失败") - return False, "表情包发送失败" + # 7. 发送表情包 + success = await self.send_emoji(emoji_base64) + + if not success: + logger.error(f"{self.log_prefix} 表情包发送失败") + await self.store_action_info(action_build_into_prompt = True,action_prompt_display =f"发送了一个{chosen_emotion}的表情包,但失败了",action_done= False) + return False, "表情包发送失败" + await self.store_action_info(action_build_into_prompt = True,action_prompt_display =f"发送了一个{chosen_emotion}的表情包",action_done= True) + + return True, f"发送表情包: {emoji_description}" except Exception as e: logger.error(f"{self.log_prefix} 表情动作执行失败: {e}", exc_info=True) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 78e4bd5f7..4df8d0a71 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -493,6 +493,9 @@ insomnia_duration_minutes = [30, 60] # 单次失眠状态的持续时间范围 # 入睡后,经过一段延迟后触发失眠判定的延迟时间(分钟),设置为范围以增加随机性 insomnia_trigger_delay_minutes = [15, 45] +[server] +host = "127.0.0.1" +port = 8080 [cross_context] # 跨群聊/私聊上下文共享配置 # 这是总开关,用于一键启用或禁用此功能