diff --git a/src/chat/emoji_system/emoji_manager.py b/src/chat/emoji_system/emoji_manager.py index d214ae21f..f1b498a22 100644 --- a/src/chat/emoji_system/emoji_manager.py +++ b/src/chat/emoji_system/emoji_manager.py @@ -4,6 +4,7 @@ import binascii import hashlib import io import json +import json_repair import os import random import re @@ -1022,6 +1023,15 @@ class EmojiManager: - 必须是表情包,非普通截图。 - 图中文字不超过5个。 请确保你的最终输出是严格的JSON对象,不要添加任何额外解释或文本。 +输出格式: +```json +{{ + "detailed_description": "", + "keywords": [], + "refined_sentence": "", + "is_compliant": true +}} +``` """ image_data_for_vlm, image_format_for_vlm = image_base64, image_format @@ -1041,16 +1051,14 @@ class EmojiManager: if not vlm_response_str: continue - match = re.search(r"\{.*\}", vlm_response_str, re.DOTALL) - if match: - vlm_response_json = json.loads(match.group(0)) - description = vlm_response_json.get("detailed_description", "") - emotions = vlm_response_json.get("keywords", []) - refined_description = vlm_response_json.get("refined_sentence", "") - is_compliant = vlm_response_json.get("is_compliant", False) - if description and emotions and refined_description: - logger.info("[VLM分析] 成功解析VLM返回的JSON数据。") - break + vlm_response_json = self._parse_json_response(vlm_response_str) + description = vlm_response_json.get("detailed_description", "") + emotions = vlm_response_json.get("keywords", []) + refined_description = vlm_response_json.get("refined_sentence", "") + is_compliant = vlm_response_json.get("is_compliant", False) + if description and emotions and refined_description: + logger.info("[VLM分析] 成功解析VLM返回的JSON数据。") + break logger.warning("[VLM分析] VLM返回的JSON数据不完整或格式错误,准备重试。") except (json.JSONDecodeError, AttributeError) as e: logger.error(f"VLM JSON解析失败 (第 {i+1}/3 次): {e}") @@ -1195,6 +1203,29 @@ class EmojiManager: logger.error(f"[错误] 删除异常处理文件时出错: {remove_error}") return False + @classmethod + def _parse_json_response(cls, response: str) -> dict[str, Any] | None: + """解析 LLM 的 JSON 响应""" + try: + # 尝试提取 JSON 代码块 + json_match = re.search(r"```json\s*(.*?)\s*```", response, re.DOTALL) + if json_match: + json_str = json_match.group(1) + else: + # 尝试直接解析 + json_str = response.strip() + + # 移除可能的注释 + json_str = re.sub(r"//.*", "", json_str) + json_str = re.sub(r"/\*.*?\*/", "", json_str, flags=re.DOTALL) + + data = json_repair.loads(json_str) + return data + + except json.JSONDecodeError as e: + logger.warning(f"JSON 解析失败: {e}, 响应: {response[:200]}") + return None + emoji_manager = None diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index c938e8692..c052a8b00 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -614,7 +614,7 @@ class DefaultReplyer: # 使用统一管理器的智能检索(Judge模型决策) search_result = await unified_manager.search_memories( query_text=query_text, - use_judge=True, + use_judge=global_config.memory.use_judge, recent_chat_history=chat_history, # 传递最近聊天历史 ) @@ -1799,8 +1799,9 @@ class DefaultReplyer: ) if content: - # 移除 [SPLIT] 标记,防止消息被分割 - content = content.replace("[SPLIT]", "") + if not global_config.response_splitter.enable or global_config.response_splitter.split_mode != 'llm': + # 移除 [SPLIT] 标记,防止消息被分割 + content = content.replace("[SPLIT]", "") # 应用统一的格式过滤器 from src.chat.utils.utils import filter_system_format_content diff --git a/src/config/official_configs.py b/src/config/official_configs.py index d8b8d48df..510b074da 100644 --- a/src/config/official_configs.py +++ b/src/config/official_configs.py @@ -514,6 +514,7 @@ class MemoryConfig(ValidatedConfigBase): short_term_decay_factor: float = Field(default=0.98, description="衰减因子") # 长期记忆层配置 + use_judge: bool = Field(default=True, description="使用评判模型决定是否检索长期记忆") long_term_batch_size: int = Field(default=10, description="批量转移大小") long_term_decay_factor: float = Field(default=0.95, description="衰减因子") long_term_auto_transfer_interval: int = Field(default=60, description="自动转移间隔(秒)") diff --git a/src/plugins/built_in/kokoro_flow_chatter/context_builder.py b/src/plugins/built_in/kokoro_flow_chatter/context_builder.py index f5ca00163..c7b07c9fc 100644 --- a/src/plugins/built_in/kokoro_flow_chatter/context_builder.py +++ b/src/plugins/built_in/kokoro_flow_chatter/context_builder.py @@ -235,7 +235,7 @@ class KFCContextBuilder: search_result = await unified_manager.search_memories( query_text=query_text, - use_judge=True, + use_judge=config.memory.use_judge, recent_chat_history=chat_history, ) diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 073b3db0d..e8ccd092d 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -103,7 +103,7 @@ command_prefixes = ['/'] [personality] # 建议50字以内,描述人格的核心特质 -personality_core = "是一个积极向上的女大学生" +personality_core = "是一个积极向上的女大学生" # 人格的细节,描述人格的一些侧面 personality_side = "用一句话或几句话描述人格的侧面特质" #アイデンティティがない 生まれないらららら @@ -313,6 +313,7 @@ short_term_search_top_k = 5 # 搜索时返回的最大数量 short_term_decay_factor = 0.98 # 衰减因子 # 长期记忆层配置 +use_judge = true # 使用评判模型决定是否检索长期记忆 long_term_batch_size = 10 # 批量转移大小 long_term_decay_factor = 0.95 # 衰减因子 long_term_auto_transfer_interval = 180 # 自动转移间隔(秒) @@ -427,7 +428,7 @@ auto_install = true #it can work now! auto_install_timeout = 300 # 是否使用PyPI镜像源(推荐,可加速下载) use_mirror = true -mirror_url = "https://pypi.tuna.tsinghua.edu.cn/simple" # PyPI镜像源URL,如: "https://pypi.tuna.tsinghua.edu.cn/simple" +mirror_url = "https://pypi.tuna.tsinghua.edu.cn/simple" # PyPI镜像源URL,如: "https://pypi.tuna.tsinghua.edu.cn/simple" # 依赖安装日志级别 install_log_level = "INFO"