This commit is contained in:
tcmofashi
2025-07-04 09:32:34 +08:00
60 changed files with 2314 additions and 1586 deletions

1
.gitignore vendored
View File

@@ -9,6 +9,7 @@ tool_call_benchmark.py
run_maibot_core.bat run_maibot_core.bat
run_napcat_adapter.bat run_napcat_adapter.bat
run_ad.bat run_ad.bat
s4u.s4u
llm_tool_benchmark_results.json llm_tool_benchmark_results.json
MaiBot-Napcat-Adapter-main MaiBot-Napcat-Adapter-main
MaiBot-Napcat-Adapter MaiBot-Napcat-Adapter

View File

@@ -1,4 +1,4 @@
FROM python:3.13.2-slim-bookworm FROM python:3.13.5-slim-bookworm
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
# 工作目录 # 工作目录

13
bot.py
View File

@@ -314,10 +314,17 @@ if __name__ == "__main__":
# Schedule tasks returns a future that runs forever. # Schedule tasks returns a future that runs forever.
# We can run console_input_loop concurrently. # We can run console_input_loop concurrently.
main_tasks = loop.create_task(main_system.schedule_tasks()) main_tasks = loop.create_task(main_system.schedule_tasks())
console_task = loop.create_task(console_input_loop(main_system))
# Wait for all tasks to complete (which they won't, normally) # 仅在 TTY 中启用 console_input_loop
loop.run_until_complete(asyncio.gather(main_tasks, console_task)) if sys.stdin.isatty():
logger.info("检测到终端环境,启用控制台输入循环")
console_task = loop.create_task(console_input_loop(main_system))
# Wait for all tasks to complete (which they won't, normally)
loop.run_until_complete(asyncio.gather(main_tasks, console_task))
else:
logger.info("非终端环境,跳过控制台输入循环")
# Wait for all tasks to complete (which they won't, normally)
loop.run_until_complete(main_tasks)
except KeyboardInterrupt: except KeyboardInterrupt:
# loop.run_until_complete(get_global_api().stop()) # loop.run_until_complete(get_global_api().stop())

View File

@@ -1,22 +1,29 @@
services: services:
adapters: adapters:
container_name: maim-bot-adapters container_name: maim-bot-adapters
#### prod ####
image: unclas/maimbot-adapter:latest image: unclas/maimbot-adapter:latest
# image: infinitycat/maimbot-adapter:latest # image: infinitycat/maimbot-adapter:latest
#### dev ####
# image: unclas/maimbot-adapter:dev
# image: infinitycat/maimbot-adapter:dev
environment: environment:
- TZ=Asia/Shanghai - TZ=Asia/Shanghai
# ports: # ports:
# - "8095:8095" # - "8095:8095"
volumes: volumes:
- ./docker-config/adapters/config.toml:/adapters/config.toml - ./docker-config/adapters/config.toml:/adapters/config.toml # 持久化adapters配置文件
- ./data/adapters:/adapters/data # adapters 数据持久化
restart: always restart: always
networks: networks:
- maim_bot - maim_bot
core: core:
container_name: maim-bot-core container_name: maim-bot-core
#### prod ####
image: sengokucola/maibot:latest image: sengokucola/maibot:latest
# image: infinitycat/maibot:latest # image: infinitycat/maibot:latest
# dev #### dev ####
# image: sengokucola/maibot:dev # image: sengokucola/maibot:dev
# image: infinitycat/maibot:dev # image: infinitycat/maibot:dev
environment: environment:
@@ -25,15 +32,15 @@ services:
# - PRIVACY_AGREE=42dddb3cbe2b784b45a2781407b298a1 # 同意EULA # - PRIVACY_AGREE=42dddb3cbe2b784b45a2781407b298a1 # 同意EULA
# ports: # ports:
# - "8000:8000" # - "8000:8000"
# - "27017:27017"
volumes: volumes:
- ./docker-config/mmc/.env:/MaiMBot/.env # 持久化env配置文件 - ./docker-config/mmc/.env:/MaiMBot/.env # 持久化env配置文件
- ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件 - ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件
- ./data/MaiMBot/maibot_statistics.html:/MaiMBot/maibot_statistics.html #统计数据输出 - ./data/MaiMBot/maibot_statistics.html:/MaiMBot/maibot_statistics.html #统计数据输出
- ./data/MaiMBot:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题 - ./data/MaiMBot:/MaiMBot/data # 共享目录
restart: always restart: always
networks: networks:
- maim_bot - maim_bot
napcat: napcat:
environment: environment:
- NAPCAT_UID=1000 - NAPCAT_UID=1000
@@ -43,13 +50,14 @@ services:
- "6099:6099" - "6099:6099"
volumes: volumes:
- ./docker-config/napcat:/app/napcat/config # 持久化napcat配置文件 - ./docker-config/napcat:/app/napcat/config # 持久化napcat配置文件
- ./data/qq:/app/.config/QQ # 持久化QQ本体并同步qq表情和图片到adapters - ./data/qq:/app/.config/QQ # 持久化QQ本体
- ./data/MaiMBot:/MaiMBot/data # NapCat 和 NoneBot 共享此卷,否则发送图片会有问题 - ./data/MaiMBot:/MaiMBot/data # 共享目录
container_name: maim-bot-napcat container_name: maim-bot-napcat
restart: always restart: always
image: mlikiowa/napcat-docker:latest image: mlikiowa/napcat-docker:latest
networks: networks:
- maim_bot - maim_bot
sqlite-web: sqlite-web:
image: coleifer/sqlite-web image: coleifer/sqlite-web
container_name: sqlite-web container_name: sqlite-web
@@ -62,6 +70,7 @@ services:
- SQLITE_DATABASE=MaiMBot/MaiBot.db # 你的数据库文件 - SQLITE_DATABASE=MaiMBot/MaiBot.db # 你的数据库文件
networks: networks:
- maim_bot - maim_bot
networks: networks:
maim_bot: maim_bot:
driver: bridge driver: bridge

0
s4u.s4u1 Normal file
View File

View File

@@ -109,3 +109,4 @@ async def get_system_basic_info():
def start_api_server(): def start_api_server():
"""启动API服务器""" """启动API服务器"""
get_global_server().register_router(router, prefix="/api/v1") get_global_server().register_router(router, prefix="/api/v1")
# pass

62
src/audio/mock_audio.py Normal file
View File

@@ -0,0 +1,62 @@
import asyncio
from src.common.logger import get_logger
logger = get_logger("MockAudio")
class MockAudioPlayer:
"""
一个模拟的音频播放器,它会根据音频数据的"长度"来模拟播放时间。
"""
def __init__(self, audio_data: bytes):
self._audio_data = audio_data
# 模拟音频时长:假设每 1024 字节代表 0.5 秒的音频
self._duration = (len(audio_data) / 1024.0) * 0.5
async def play(self):
"""模拟播放音频。该过程可以被中断。"""
if self._duration <= 0:
return
logger.info(f"开始播放模拟音频,预计时长: {self._duration:.2f} 秒...")
try:
await asyncio.sleep(self._duration)
logger.info("模拟音频播放完毕。")
except asyncio.CancelledError:
logger.info("音频播放被中断。")
raise # 重新抛出异常,以便上层逻辑可以捕获它
class MockAudioGenerator:
"""
一个模拟的文本到语音TTS生成器。
"""
def __init__(self):
# 模拟生成速度:每秒生成的字符数
self.chars_per_second = 25.0
async def generate(self, text: str) -> bytes:
"""
模拟从文本生成音频数据。该过程可以被中断。
Args:
text: 需要转换为音频的文本。
Returns:
模拟的音频数据bytes
"""
if not text:
return b""
generation_time = len(text) / self.chars_per_second
logger.info(f"模拟生成音频... 文本长度: {len(text)}, 预计耗时: {generation_time:.2f} 秒...")
try:
await asyncio.sleep(generation_time)
# 生成虚拟的音频数据,其长度与文本长度成正比
mock_audio_data = b"\x01\x02\x03" * (len(text) * 40)
logger.info(f"模拟音频生成完毕,数据大小: {len(mock_audio_data) / 1024:.2f} KB。")
return mock_audio_data
except asyncio.CancelledError:
logger.info("音频生成被中断。")
raise # 重新抛出异常

View File

@@ -80,14 +80,16 @@ class ExpressionSelector:
) )
def get_random_expressions( def get_random_expressions(
self, chat_id: str, style_num: int, grammar_num: int, personality_num: int self, chat_id: str, total_num: int, style_percentage: float, grammar_percentage: float
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]: ) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
( (
learnt_style_expressions, learnt_style_expressions,
learnt_grammar_expressions, learnt_grammar_expressions,
personality_expressions,
) = self.expression_learner.get_expression_by_chat_id(chat_id) ) = self.expression_learner.get_expression_by_chat_id(chat_id)
style_num = int(total_num * style_percentage)
grammar_num = int(total_num * grammar_percentage)
# 按权重抽样使用count作为权重 # 按权重抽样使用count作为权重
if learnt_style_expressions: if learnt_style_expressions:
style_weights = [expr.get("count", 1) for expr in learnt_style_expressions] style_weights = [expr.get("count", 1) for expr in learnt_style_expressions]
@@ -101,13 +103,7 @@ class ExpressionSelector:
else: else:
selected_grammar = [] selected_grammar = []
if personality_expressions: return selected_style, selected_grammar
personality_weights = [expr.get("count", 1) for expr in personality_expressions]
selected_personality = weighted_sample(personality_expressions, personality_weights, personality_num)
else:
selected_personality = []
return selected_style, selected_grammar, selected_personality
def update_expressions_count_batch(self, expressions_to_update: List[Dict[str, str]], increment: float = 0.1): def update_expressions_count_batch(self, expressions_to_update: List[Dict[str, str]], increment: float = 0.1):
"""对一批表达方式更新count值按文件分组后一次性写入""" """对一批表达方式更新count值按文件分组后一次性写入"""
@@ -174,7 +170,7 @@ class ExpressionSelector:
"""使用LLM选择适合的表达方式""" """使用LLM选择适合的表达方式"""
# 1. 获取35个随机表达方式现在按权重抽取 # 1. 获取35个随机表达方式现在按权重抽取
style_exprs, grammar_exprs, personality_exprs = self.get_random_expressions(chat_id, 25, 25, 10) style_exprs, grammar_exprs = self.get_random_expressions(chat_id, 50, 0.5, 0.5)
# 2. 构建所有表达方式的索引和情境列表 # 2. 构建所有表达方式的索引和情境列表
all_expressions = [] all_expressions = []
@@ -196,14 +192,6 @@ class ExpressionSelector:
all_expressions.append(expr_with_type) all_expressions.append(expr_with_type)
all_situations.append(f"{len(all_expressions)}.{expr['situation']}") all_situations.append(f"{len(all_expressions)}.{expr['situation']}")
# 添加personality表达方式
for expr in personality_exprs:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
expr_with_type = expr.copy()
expr_with_type["type"] = "style_personality"
all_expressions.append(expr_with_type)
all_situations.append(f"{len(all_expressions)}.{expr['situation']}")
if not all_expressions: if not all_expressions:
logger.warning("没有找到可用的表达方式") logger.warning("没有找到可用的表达方式")
return [] return []
@@ -260,7 +248,7 @@ class ExpressionSelector:
# 对选中的所有表达方式一次性更新count数 # 对选中的所有表达方式一次性更新count数
if valid_expressions: if valid_expressions:
self.update_expressions_count_batch(valid_expressions, 0.003) self.update_expressions_count_batch(valid_expressions, 0.006)
# logger.info(f"LLM从{len(all_expressions)}个情境中选择了{len(valid_expressions)}个") # logger.info(f"LLM从{len(all_expressions)}个情境中选择了{len(valid_expressions)}个")
return valid_expressions return valid_expressions

View File

@@ -74,16 +74,13 @@ class ExpressionLearner:
) )
self.llm_model = None self.llm_model = None
def get_expression_by_chat_id( def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
self, chat_id: str
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]]:
""" """
获取指定chat_id的style和grammar表达方式, 同时获取全局的personality表达方式 获取指定chat_id的style和grammar表达方式
返回的每个表达方式字典中都包含了source_id, 用于后续的更新操作 返回的每个表达方式字典中都包含了source_id, 用于后续的更新操作
""" """
learnt_style_expressions = [] learnt_style_expressions = []
learnt_grammar_expressions = [] learnt_grammar_expressions = []
personality_expressions = []
# 获取style表达方式 # 获取style表达方式
style_dir = os.path.join("data", "expression", "learnt_style", str(chat_id)) style_dir = os.path.join("data", "expression", "learnt_style", str(chat_id))
@@ -111,19 +108,7 @@ class ExpressionLearner:
except Exception as e: except Exception as e:
logger.error(f"读取grammar表达方式失败: {e}") logger.error(f"读取grammar表达方式失败: {e}")
# 获取personality表达方式 return learnt_style_expressions, learnt_grammar_expressions
personality_file = os.path.join("data", "expression", "personality", "expressions.json")
if os.path.exists(personality_file):
try:
with open(personality_file, "r", encoding="utf-8") as f:
expressions = json.load(f)
for expr in expressions:
expr["source_id"] = "personality" # 添加来源ID
personality_expressions.append(expr)
except Exception as e:
logger.error(f"读取personality表达方式失败: {e}")
return learnt_style_expressions, learnt_grammar_expressions, personality_expressions
def is_similar(self, s1: str, s2: str) -> bool: def is_similar(self, s1: str, s2: str) -> bool:
""" """
@@ -428,6 +413,7 @@ class ExpressionLearner:
init_prompt() init_prompt()
expression_learner = None expression_learner = None

View File

@@ -25,7 +25,6 @@ class CycleDetail:
self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息 self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息
self.loop_plan_info: Dict[str, Any] = {} self.loop_plan_info: Dict[str, Any] = {}
self.loop_action_info: Dict[str, Any] = {} self.loop_action_info: Dict[str, Any] = {}
self.loop_post_processor_info: Dict[str, Any] = {} # 后处理器信息
def to_dict(self) -> Dict[str, Any]: def to_dict(self) -> Dict[str, Any]:
"""将循环信息转换为字典格式""" """将循环信息转换为字典格式"""
@@ -80,7 +79,6 @@ class CycleDetail:
"loop_processor_info": convert_to_serializable(self.loop_processor_info), "loop_processor_info": convert_to_serializable(self.loop_processor_info),
"loop_plan_info": convert_to_serializable(self.loop_plan_info), "loop_plan_info": convert_to_serializable(self.loop_plan_info),
"loop_action_info": convert_to_serializable(self.loop_action_info), "loop_action_info": convert_to_serializable(self.loop_action_info),
"loop_post_processor_info": convert_to_serializable(self.loop_post_processor_info),
} }
def complete_cycle(self): def complete_cycle(self):
@@ -135,4 +133,3 @@ class CycleDetail:
self.loop_processor_info = loop_info["loop_processor_info"] self.loop_processor_info = loop_info["loop_processor_info"]
self.loop_plan_info = loop_info["loop_plan_info"] self.loop_plan_info = loop_info["loop_plan_info"]
self.loop_action_info = loop_info["loop_action_info"] self.loop_action_info = loop_info["loop_action_info"]
self.loop_post_processor_info = loop_info["loop_post_processor_info"]

View File

@@ -17,9 +17,8 @@ from src.chat.focus_chat.info_processors.working_memory_processor import Working
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.observation.structure_observation import StructureObservation
from src.chat.heart_flow.observation.actions_observation import ActionObservation from src.chat.heart_flow.observation.actions_observation import ActionObservation
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.memory_activator import MemoryActivator
from src.chat.focus_chat.info_processors.base_processor import BaseProcessor from src.chat.focus_chat.info_processors.base_processor import BaseProcessor
from src.chat.focus_chat.planners.planner_factory import PlannerFactory from src.chat.focus_chat.planners.planner_factory import PlannerFactory
@@ -28,21 +27,18 @@ from src.chat.focus_chat.planners.action_manager import ActionManager
from src.config.config import global_config from src.config.config import global_config
from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger
from src.chat.focus_chat.hfc_version_manager import get_hfc_version from src.chat.focus_chat.hfc_version_manager import get_hfc_version
from src.chat.focus_chat.info.structured_info import StructuredInfo
from src.person_info.relationship_builder_manager import relationship_builder_manager from src.person_info.relationship_builder_manager import relationship_builder_manager
install(extra_lines=3) install(extra_lines=3)
# 超时常量配置 # 注释:原来的动作修改超时常量已移除,因为改为顺序执行
ACTION_MODIFICATION_TIMEOUT = 15.0 # 动作修改任务超时时限(秒)
# 定义观察器映射:键是观察器名称,值是 (观察器类, 初始化参数) # 定义观察器映射:键是观察器名称,值是 (观察器类, 初始化参数)
OBSERVATION_CLASSES = { OBSERVATION_CLASSES = {
"ChattingObservation": (ChattingObservation, "chat_id"), "ChattingObservation": (ChattingObservation, "chat_id"),
"WorkingMemoryObservation": (WorkingMemoryObservation, "observe_id"), "WorkingMemoryObservation": (WorkingMemoryObservation, "observe_id"),
"HFCloopObservation": (HFCloopObservation, "observe_id"), "HFCloopObservation": (HFCloopObservation, "observe_id"),
"StructureObservation": (StructureObservation, "observe_id"),
} }
# 定义处理器映射:键是处理器名称,值是 (处理器类, 可选的配置键名) # 定义处理器映射:键是处理器名称,值是 (处理器类, 可选的配置键名)
@@ -51,11 +47,6 @@ PROCESSOR_CLASSES = {
"WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"), "WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"),
} }
# 定义后期处理器映射:在规划后、动作执行前运行的处理器
POST_PLANNING_PROCESSOR_CLASSES = {
"ToolProcessor": (ToolProcessor, "tool_use_processor"),
}
logger = get_logger("hfc") # Logger Name Changed logger = get_logger("hfc") # Logger Name Changed
@@ -120,31 +111,13 @@ class HeartFChatting:
self._register_observations() self._register_observations()
# 根据配置文件和默认规则确定启用的处理器 # 根据配置文件和默认规则确定启用的处理器
config_processor_settings = global_config.focus_chat_processor self.enabled_processor_names = ["ChattingInfoProcessor"]
self.enabled_processor_names = [] if global_config.focus_chat.working_memory_processor:
self.enabled_processor_names.append("WorkingMemoryProcessor")
for proc_name, (_proc_class, config_key) in PROCESSOR_CLASSES.items():
# 检查处理器是否应该启用
if not config_key or getattr(config_processor_settings, config_key, True):
self.enabled_processor_names.append(proc_name)
# 初始化后期处理器(规划后执行的处理器)
self.enabled_post_planning_processor_names = []
for proc_name, (_proc_class, config_key) in POST_PLANNING_PROCESSOR_CLASSES.items():
# 对于关系相关处理器,需要同时检查关系配置项
if not config_key or getattr(config_processor_settings, config_key, True):
self.enabled_post_planning_processor_names.append(proc_name)
# logger.info(f"{self.log_prefix} 将启用的处理器: {self.enabled_processor_names}")
# logger.info(f"{self.log_prefix} 将启用的后期处理器: {self.enabled_post_planning_processor_names}")
self.processors: List[BaseProcessor] = [] self.processors: List[BaseProcessor] = []
self._register_default_processors() self._register_default_processors()
# 初始化后期处理器
self.post_planning_processors: List[BaseProcessor] = []
self._register_post_planning_processors()
self.action_manager = ActionManager() self.action_manager = ActionManager()
self.action_planner = PlannerFactory.create_planner( self.action_planner = PlannerFactory.create_planner(
log_prefix=self.log_prefix, action_manager=self.action_manager log_prefix=self.log_prefix, action_manager=self.action_manager
@@ -186,7 +159,7 @@ class HeartFChatting:
# 检查是否需要跳过WorkingMemoryObservation # 检查是否需要跳过WorkingMemoryObservation
if name == "WorkingMemoryObservation": if name == "WorkingMemoryObservation":
# 如果工作记忆处理器被禁用则跳过WorkingMemoryObservation # 如果工作记忆处理器被禁用则跳过WorkingMemoryObservation
if not global_config.focus_chat_processor.working_memory_processor: if not global_config.focus_chat.working_memory_processor:
logger.debug(f"{self.log_prefix} 工作记忆处理器已禁用,跳过注册观察器 {name}") logger.debug(f"{self.log_prefix} 工作记忆处理器已禁用,跳过注册观察器 {name}")
continue continue
@@ -211,16 +184,12 @@ class HeartFChatting:
processor_info = PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key) processor_info = PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key)
if processor_info: if processor_info:
processor_actual_class = processor_info[0] # 获取实际的类定义 processor_actual_class = processor_info[0] # 获取实际的类定义
# 根据处理器类名判断是否需要 subheartflow_id # 根据处理器类名判断构造参数
if name in [ if name == "ChattingInfoProcessor":
"WorkingMemoryProcessor",
]:
self.processors.append(processor_actual_class(subheartflow_id=self.stream_id))
elif name == "ChattingInfoProcessor":
self.processors.append(processor_actual_class()) self.processors.append(processor_actual_class())
elif name == "WorkingMemoryProcessor":
self.processors.append(processor_actual_class(subheartflow_id=self.stream_id))
else: else:
# 对于PROCESSOR_CLASSES中定义但此处未明确处理构造的处理器
# (例如, 新增了一个处理器到PROCESSOR_CLASSES, 它不需要id, 也不叫ChattingInfoProcessor)
try: try:
self.processors.append(processor_actual_class()) # 尝试无参构造 self.processors.append(processor_actual_class()) # 尝试无参构造
logger.debug(f"{self.log_prefix} 注册处理器 {name} (尝试无参构造).") logger.debug(f"{self.log_prefix} 注册处理器 {name} (尝试无参构造).")
@@ -229,7 +198,6 @@ class HeartFChatting:
f"{self.log_prefix} 处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id但未在注册逻辑中明确处理。" f"{self.log_prefix} 处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id但未在注册逻辑中明确处理。"
) )
else: else:
# 这理论上不应该发生,因为 enabled_processor_names 是从 PROCESSOR_CLASSES 的键生成的
logger.warning( logger.warning(
f"{self.log_prefix} 在 PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。" f"{self.log_prefix} 在 PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。"
) )
@@ -239,47 +207,6 @@ class HeartFChatting:
else: else:
logger.warning(f"{self.log_prefix} 没有注册任何处理器。这可能是由于配置错误或所有处理器都被禁用了。") logger.warning(f"{self.log_prefix} 没有注册任何处理器。这可能是由于配置错误或所有处理器都被禁用了。")
def _register_post_planning_processors(self):
"""根据 self.enabled_post_planning_processor_names 注册后期处理器"""
self.post_planning_processors = [] # 清空已有的
for name in self.enabled_post_planning_processor_names: # 'name' is "PersonImpressionpProcessor", etc.
processor_info = POST_PLANNING_PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key)
if processor_info:
processor_actual_class = processor_info[0] # 获取实际的类定义
# 根据处理器类名判断是否需要 subheartflow_id
if name in [
"ToolProcessor",
"RelationshipBuildProcessor",
"RealTimeInfoProcessor",
"ExpressionSelectorProcessor",
]:
self.post_planning_processors.append(processor_actual_class(subheartflow_id=self.stream_id))
else:
# 对于POST_PLANNING_PROCESSOR_CLASSES中定义但此处未明确处理构造的处理器
# (例如, 新增了一个处理器到POST_PLANNING_PROCESSOR_CLASSES, 它不需要id, 也不叫PersonImpressionpProcessor)
try:
self.post_planning_processors.append(processor_actual_class()) # 尝试无参构造
logger.debug(f"{self.log_prefix} 注册后期处理器 {name} (尝试无参构造).")
except TypeError:
logger.error(
f"{self.log_prefix} 后期处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id但未在注册逻辑中明确处理。"
)
else:
# 这理论上不应该发生,因为 enabled_post_planning_processor_names 是从 POST_PLANNING_PROCESSOR_CLASSES 的键生成的
logger.warning(
f"{self.log_prefix} 在 POST_PLANNING_PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。"
)
if self.post_planning_processors:
logger.info(
f"{self.log_prefix} 已注册后期处理器: {[p.__class__.__name__ for p in self.post_planning_processors]}"
)
else:
logger.warning(
f"{self.log_prefix} 没有注册任何后期处理器。这可能是由于配置错误或所有后期处理器都被禁用了。"
)
async def start(self): async def start(self):
"""检查是否需要启动主循环,如果未激活则启动。""" """检查是否需要启动主循环,如果未激活则启动。"""
logger.debug(f"{self.log_prefix} 开始启动 HeartFChatting") logger.debug(f"{self.log_prefix} 开始启动 HeartFChatting")
@@ -460,27 +387,12 @@ class HeartFChatting:
("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else "" ("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else ""
) )
# 新增:输出每个后处理器的耗时
post_processor_time_costs = self._current_cycle_detail.loop_post_processor_info.get(
"post_processor_time_costs", {}
)
post_processor_time_strings = []
for pname, ptime in post_processor_time_costs.items():
formatted_ptime = f"{ptime * 1000:.2f}毫秒" if ptime < 1 else f"{ptime:.2f}"
post_processor_time_strings.append(f"{pname}: {formatted_ptime}")
post_processor_time_log = (
("\n后处理器耗时: " + "; ".join(post_processor_time_strings))
if post_processor_time_strings
else ""
)
logger.info( logger.info(
f"{self.log_prefix}{self._current_cycle_detail.cycle_id}次思考," f"{self.log_prefix}{self._current_cycle_detail.cycle_id}次思考,"
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, " f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}" f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "") + (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
+ processor_time_log + processor_time_log
+ post_processor_time_log
) )
# 记录性能数据 # 记录性能数据
@@ -491,8 +403,7 @@ class HeartFChatting:
"action_type": action_result.get("action_type", "unknown"), "action_type": action_result.get("action_type", "unknown"),
"total_time": self._current_cycle_detail.end_time - self._current_cycle_detail.start_time, "total_time": self._current_cycle_detail.end_time - self._current_cycle_detail.start_time,
"step_times": cycle_timers.copy(), "step_times": cycle_timers.copy(),
"processor_time_costs": processor_time_costs, # 处理器时间 "processor_time_costs": processor_time_costs, # 处理器时间
"post_processor_time_costs": post_processor_time_costs, # 后处理器时间
"reasoning": action_result.get("reasoning", ""), "reasoning": action_result.get("reasoning", ""),
"success": self._current_cycle_detail.loop_action_info.get("action_taken", False), "success": self._current_cycle_detail.loop_action_info.get("action_taken", False),
} }
@@ -634,123 +545,6 @@ class HeartFChatting:
return all_plan_info, processor_time_costs return all_plan_info, processor_time_costs
async def _process_post_planning_processors_with_timing(
self, observations: List[Observation], action_type: str, action_data: dict
) -> tuple[dict, dict]:
"""
处理后期处理器(规划后执行的处理器)并收集详细时间统计
包括:关系处理器、表达选择器、记忆激活器
参数:
observations: 观察器列表
action_type: 动作类型
action_data: 原始动作数据
返回:
tuple[dict, dict]: (更新后的动作数据, 后处理器时间统计)
"""
logger.info(f"{self.log_prefix} 开始执行后期处理器(带详细统计)")
# 创建所有后期任务
task_list = []
task_to_name_map = {}
task_start_times = {}
post_processor_time_costs = {}
# 添加后期处理器任务
for processor in self.post_planning_processors:
processor_name = processor.__class__.__name__
async def run_processor_with_timeout_and_timing(proc=processor, name=processor_name):
start_time = time.time()
try:
result = await asyncio.wait_for(
proc.process_info(observations=observations, action_type=action_type, action_data=action_data),
30,
)
end_time = time.time()
post_processor_time_costs[name] = end_time - start_time
logger.debug(f"{self.log_prefix} 后期处理器 {name} 耗时: {end_time - start_time:.3f}")
return result
except Exception as e:
end_time = time.time()
post_processor_time_costs[name] = end_time - start_time
logger.warning(f"{self.log_prefix} 后期处理器 {name} 执行异常,耗时: {end_time - start_time:.3f}")
raise e
task = asyncio.create_task(run_processor_with_timeout_and_timing())
task_list.append(task)
task_to_name_map[task] = ("processor", processor_name)
task_start_times[task] = time.time()
logger.info(f"{self.log_prefix} 启动后期处理器任务: {processor_name}")
# 如果没有任何后期任务,直接返回
if not task_list:
logger.info(f"{self.log_prefix} 没有启用的后期处理器或记忆激活器")
return action_data, {}
# 等待所有任务完成
pending_tasks = set(task_list)
all_post_plan_info = []
while pending_tasks:
done, pending_tasks = await asyncio.wait(pending_tasks, return_when=asyncio.FIRST_COMPLETED)
for task in done:
task_type, task_name = task_to_name_map[task]
try:
result = await task
if task_type == "processor":
logger.info(f"{self.log_prefix} 后期处理器 {task_name} 已完成!")
if result is not None:
all_post_plan_info.extend(result)
else:
logger.warning(f"{self.log_prefix} 后期处理器 {task_name} 返回了 None")
except asyncio.TimeoutError:
# 对于超时任务,记录已用时间
elapsed_time = time.time() - task_start_times[task]
if task_type == "processor":
post_processor_time_costs[task_name] = elapsed_time
logger.warning(
f"{self.log_prefix} 后期处理器 {task_name} 超时(>30s已跳过耗时: {elapsed_time:.3f}"
)
except Exception as e:
# 对于异常任务,记录已用时间
elapsed_time = time.time() - task_start_times[task]
if task_type == "processor":
post_processor_time_costs[task_name] = elapsed_time
logger.error(
f"{self.log_prefix} 后期处理器 {task_name} 执行失败,耗时: {elapsed_time:.3f}秒. 错误: {e}",
exc_info=True,
)
# 将后期处理器的结果整合到 action_data 中
updated_action_data = action_data.copy()
structured_info = ""
for info in all_post_plan_info:
if isinstance(info, StructuredInfo):
structured_info = info.get_processed_info()
if structured_info:
updated_action_data["structured_info"] = structured_info
if all_post_plan_info:
logger.info(f"{self.log_prefix} 后期处理完成,产生了 {len(all_post_plan_info)} 个信息项")
# 输出详细统计信息
if post_processor_time_costs:
stats_str = ", ".join(
[f"{name}: {time_cost:.3f}s" for name, time_cost in post_processor_time_costs.items()]
)
logger.info(f"{self.log_prefix} 后期处理器详细耗时统计: {stats_str}")
return updated_action_data, post_processor_time_costs
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict: async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
try: try:
loop_start_time = time.time() loop_start_time = time.time()
@@ -765,10 +559,10 @@ class HeartFChatting:
await self.relationship_builder.build_relation() await self.relationship_builder.build_relation()
# 并行执行调整动作、回忆和处理器阶段 # 顺序执行调整动作和处理器阶段
with Timer("调整动作、处理", cycle_timers): # 第一步:动作修改
# 创建并行任务 with Timer("动作修改", cycle_timers):
async def modify_actions_task(): try:
# 调用完整的动作修改流程 # 调用完整的动作修改流程
await self.action_modifier.modify_actions( await self.action_modifier.modify_actions(
observations=self.observations, observations=self.observations,
@@ -776,44 +570,17 @@ class HeartFChatting:
await self.action_observation.observe() await self.action_observation.observe()
self.observations.append(self.action_observation) self.observations.append(self.action_observation)
return True logger.debug(f"{self.log_prefix} 动作修改完成")
# 创建两个并行任务为LLM调用添加超时保护
action_modify_task = asyncio.create_task(
asyncio.wait_for(modify_actions_task(), timeout=ACTION_MODIFICATION_TIMEOUT)
)
processor_task = asyncio.create_task(self._process_processors(self.observations))
# 等待两个任务完成,使用超时保护和详细错误处理
action_modify_result = None
all_plan_info = []
processor_time_costs = {}
try:
action_modify_result, (all_plan_info, processor_time_costs) = await asyncio.gather(
action_modify_task, processor_task, return_exceptions=True
)
# 检查各个任务的结果
if isinstance(action_modify_result, Exception):
if isinstance(action_modify_result, asyncio.TimeoutError):
logger.error(f"{self.log_prefix} 动作修改任务超时")
else:
logger.error(f"{self.log_prefix} 动作修改任务失败: {action_modify_result}")
processor_result = (all_plan_info, processor_time_costs)
if isinstance(processor_result, Exception):
if isinstance(processor_result, asyncio.TimeoutError):
logger.error(f"{self.log_prefix} 处理器任务超时")
else:
logger.error(f"{self.log_prefix} 处理器任务失败: {processor_result}")
all_plan_info = []
processor_time_costs = {}
else:
all_plan_info, processor_time_costs = processor_result
except Exception as e: except Exception as e:
logger.error(f"{self.log_prefix} 并行任务gather失败: {e}") logger.error(f"{self.log_prefix} 动作修改失败: {e}")
# 继续执行,不中断流程
# 第二步:信息处理器
with Timer("信息处理器", cycle_timers):
try:
all_plan_info, processor_time_costs = await self._process_processors(self.observations)
except Exception as e:
logger.error(f"{self.log_prefix} 信息处理器失败: {e}")
# 设置默认值以继续执行 # 设置默认值以继续执行
all_plan_info = [] all_plan_info = []
processor_time_costs = {} processor_time_costs = {}
@@ -833,7 +600,6 @@ class HeartFChatting:
"observed_messages": plan_result.get("observed_messages", ""), "observed_messages": plan_result.get("observed_messages", ""),
} }
# 修正将后期处理器从执行动作Timer中分离出来
action_type, action_data, reasoning = ( action_type, action_data, reasoning = (
plan_result.get("action_result", {}).get("action_type", "error"), plan_result.get("action_result", {}).get("action_type", "error"),
plan_result.get("action_result", {}).get("action_data", {}), plan_result.get("action_result", {}).get("action_data", {}),
@@ -849,22 +615,7 @@ class HeartFChatting:
logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}'") logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}'")
# 添加:单独计时后期处理器,并收集详细统计 # 动作执行计时
post_processor_time_costs = {}
if action_type != "no_reply":
with Timer("后期处理器", cycle_timers):
logger.debug(f"{self.log_prefix} 执行后期处理器(动作类型: {action_type}")
# 记录详细的后处理器时间
post_start_time = time.time()
action_data, post_processor_time_costs = await self._process_post_planning_processors_with_timing(
self.observations, action_type, action_data
)
post_end_time = time.time()
logger.info(f"{self.log_prefix} 后期处理器总耗时: {post_end_time - post_start_time:.3f}")
else:
logger.debug(f"{self.log_prefix} 跳过后期处理器(动作类型: {action_type}")
# 修正:纯动作执行计时
with Timer("动作执行", cycle_timers): with Timer("动作执行", cycle_timers):
success, reply_text, command = await self._handle_action( success, reply_text, command = await self._handle_action(
action_type, reasoning, action_data, cycle_timers, thinking_id action_type, reasoning, action_data, cycle_timers, thinking_id
@@ -877,17 +628,11 @@ class HeartFChatting:
"taken_time": time.time(), "taken_time": time.time(),
} }
# 添加后处理器统计到loop_info
loop_post_processor_info = {
"post_processor_time_costs": post_processor_time_costs,
}
loop_info = { loop_info = {
"loop_observation_info": loop_observation_info, "loop_observation_info": loop_observation_info,
"loop_processor_info": loop_processor_info, "loop_processor_info": loop_processor_info,
"loop_plan_info": loop_plan_info, "loop_plan_info": loop_plan_info,
"loop_action_info": loop_action_info, "loop_action_info": loop_action_info,
"loop_post_processor_info": loop_post_processor_info, # 新增
} }
return loop_info return loop_info

View File

@@ -3,16 +3,14 @@ from src.config.config import global_config
from src.chat.message_receive.message import MessageRecv from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.storage import MessageStorage from src.chat.message_receive.storage import MessageStorage
from src.chat.heart_flow.heartflow import heartflow from src.chat.heart_flow.heartflow import heartflow
from src.chat.message_receive.chat_stream import get_chat_manager, ChatStream from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.utils.utils import is_mentioned_bot_in_message from src.chat.utils.utils import is_mentioned_bot_in_message
from src.chat.utils.timer_calculator import Timer from src.chat.utils.timer_calculator import Timer
from src.common.logger import get_logger from src.common.logger import get_logger
import math
import re import re
import math
import traceback import traceback
from typing import Optional, Tuple from typing import Optional, Tuple
from maim_message import UserInfo
from src.person_info.relationship_manager import get_relationship_manager from src.person_info.relationship_manager import get_relationship_manager
@@ -90,46 +88,6 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
return interested_rate, is_mentioned return interested_rate, is_mentioned
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否包含过滤词
Args:
text: 待检查的文本
chat: 聊天对象
userinfo: 用户信息
Returns:
bool: 是否包含过滤词
"""
for word in global_config.message_receive.ban_words:
if word in text:
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
logger.info(f"[过滤词识别]消息中含有{word}filtered")
return True
return False
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否匹配过滤正则表达式
Args:
text: 待检查的文本
chat: 聊天对象
userinfo: 用户信息
Returns:
bool: 是否匹配过滤正则
"""
for pattern in global_config.message_receive.ban_msgs_regex:
if re.search(pattern, text):
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
logger.info(f"[正则表达式过滤]消息匹配到{pattern}filtered")
return True
return False
class HeartFCMessageReceiver: class HeartFCMessageReceiver:
"""心流处理器,负责处理接收到的消息并计算兴趣度""" """心流处理器,负责处理接收到的消息并计算兴趣度"""
@@ -167,12 +125,6 @@ class HeartFCMessageReceiver:
subheartflow = await heartflow.get_or_create_subheartflow(chat.stream_id) subheartflow = await heartflow.get_or_create_subheartflow(chat.stream_id)
message.update_chat_stream(chat) message.update_chat_stream(chat)
# 3. 过滤检查
if _check_ban_words(message.processed_plain_text, chat, userinfo) or _check_ban_regex(
message.raw_message, chat, userinfo
):
return
# 6. 兴趣度计算与更新 # 6. 兴趣度计算与更新
interested_rate, is_mentioned = await _calculate_interest(message) interested_rate, is_mentioned = await _calculate_interest(message)
subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned) subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned)
@@ -183,7 +135,6 @@ class HeartFCMessageReceiver:
current_talk_frequency = global_config.chat.get_current_talk_frequency(chat.stream_id) current_talk_frequency = global_config.chat.get_current_talk_frequency(chat.stream_id)
# 如果消息中包含图片标识,则日志展示为图片 # 如果消息中包含图片标识,则日志展示为图片
import re
picid_match = re.search(r"\[picid:([^\]]+)\]", message.processed_plain_text) picid_match = re.search(r"\[picid:([^\]]+)\]", message.processed_plain_text)
if picid_match: if picid_match:

View File

@@ -42,7 +42,6 @@ class HFCPerformanceLogger:
"total_time": cycle_data.get("total_time", 0), "total_time": cycle_data.get("total_time", 0),
"step_times": cycle_data.get("step_times", {}), "step_times": cycle_data.get("step_times", {}),
"processor_time_costs": cycle_data.get("processor_time_costs", {}), # 前处理器时间 "processor_time_costs": cycle_data.get("processor_time_costs", {}), # 前处理器时间
"post_processor_time_costs": cycle_data.get("post_processor_time_costs", {}), # 后处理器时间
"reasoning": cycle_data.get("reasoning", ""), "reasoning": cycle_data.get("reasoning", ""),
"success": cycle_data.get("success", False), "success": cycle_data.get("success", False),
} }
@@ -60,13 +59,6 @@ class HFCPerformanceLogger:
f"time={record['total_time']:.2f}s", f"time={record['total_time']:.2f}s",
] ]
# 添加后处理器时间信息到日志
if record["post_processor_time_costs"]:
post_processor_stats = ", ".join(
[f"{name}: {time_cost:.3f}s" for name, time_cost in record["post_processor_time_costs"].items()]
)
log_parts.append(f"post_processors=({post_processor_stats})")
logger.debug(f"记录HFC循环数据: {', '.join(log_parts)}") logger.debug(f"记录HFC循环数据: {', '.join(log_parts)}")
except Exception as e: except Exception as e:

View File

@@ -20,7 +20,7 @@ class HFCVersionManager:
"""HFC版本号管理器""" """HFC版本号管理器"""
# 默认版本号 # 默认版本号
DEFAULT_VERSION = "v4.0.0" DEFAULT_VERSION = "v5.0.0"
# 当前运行时版本号 # 当前运行时版本号
_current_version: Optional[str] = None _current_version: Optional[str] = None

View File

@@ -1,71 +0,0 @@
from dataclasses import dataclass
from typing import List, Dict
from .info_base import InfoBase
@dataclass
class ExpressionSelectionInfo(InfoBase):
"""表达选择信息类
用于存储和管理选中的表达方式信息。
Attributes:
type (str): 信息类型标识符,默认为 "expression_selection"
data (Dict[str, Any]): 包含选中表达方式的数据字典
"""
type: str = "expression_selection"
def get_selected_expressions(self) -> List[Dict[str, str]]:
"""获取选中的表达方式列表
Returns:
List[Dict[str, str]]: 选中的表达方式列表
"""
return self.get_info("selected_expressions") or []
def set_selected_expressions(self, expressions: List[Dict[str, str]]) -> None:
"""设置选中的表达方式列表
Args:
expressions: 选中的表达方式列表
"""
self.data["selected_expressions"] = expressions
def get_expressions_count(self) -> int:
"""获取选中表达方式的数量
Returns:
int: 表达方式数量
"""
return len(self.get_selected_expressions())
def get_processed_info(self) -> str:
"""获取处理后的信息
Returns:
str: 处理后的信息字符串
"""
expressions = self.get_selected_expressions()
if not expressions:
return ""
# 格式化表达方式为可读文本
formatted_expressions = []
for expr in expressions:
situation = expr.get("situation", "")
style = expr.get("style", "")
expr.get("type", "")
if situation and style:
formatted_expressions.append(f"{situation}时,使用 {style}")
return "\n".join(formatted_expressions)
def get_expressions_for_action_data(self) -> List[Dict[str, str]]:
"""获取用于action_data的表达方式数据
Returns:
List[Dict[str, str]]: 格式化后的表达方式数据
"""
return self.get_selected_expressions()

View File

@@ -1,34 +0,0 @@
from typing import Dict, Any
from dataclasses import dataclass, field
from .info_base import InfoBase
@dataclass
class MindInfo(InfoBase):
"""思维信息类
用于存储和管理当前思维状态的信息。
Attributes:
type (str): 信息类型标识符,默认为 "mind"
data (Dict[str, Any]): 包含 current_mind 的数据字典
"""
type: str = "mind"
data: Dict[str, Any] = field(default_factory=lambda: {"current_mind": ""})
def get_current_mind(self) -> str:
"""获取当前思维状态
Returns:
str: 当前思维状态
"""
return self.get_info("current_mind") or ""
def set_current_mind(self, mind: str) -> None:
"""设置当前思维状态
Args:
mind: 要设置的思维状态
"""
self.data["current_mind"] = mind

View File

@@ -1,40 +0,0 @@
from dataclasses import dataclass
from .info_base import InfoBase
@dataclass
class RelationInfo(InfoBase):
"""关系信息类
用于存储和管理当前关系状态的信息。
Attributes:
type (str): 信息类型标识符,默认为 "relation"
data (Dict[str, Any]): 包含 current_relation 的数据字典
"""
type: str = "relation"
def get_relation_info(self) -> str:
"""获取当前关系状态
Returns:
str: 当前关系状态
"""
return self.get_info("relation_info") or ""
def set_relation_info(self, relation_info: str) -> None:
"""设置当前关系状态
Args:
relation_info: 要设置的关系状态
"""
self.data["relation_info"] = relation_info
def get_processed_info(self) -> str:
"""获取处理后的信息
Returns:
str: 处理后的信息
"""
return self.get_relation_info() or ""

View File

@@ -1,85 +0,0 @@
from typing import Dict, Optional, Any, List
from dataclasses import dataclass, field
@dataclass
class StructuredInfo:
"""信息基类
这是一个基础信息类,用于存储和管理各种类型的信息数据。
所有具体的信息类都应该继承自这个基类。
Attributes:
type (str): 信息类型标识符,默认为 "base"
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
支持存储字符串、字典、列表等嵌套数据结构
"""
type: str = "structured_info"
data: Dict[str, Any] = field(default_factory=dict)
def get_type(self) -> str:
"""获取信息类型
Returns:
str: 当前信息对象的类型标识符
"""
return self.type
def get_data(self) -> Dict[str, Any]:
"""获取所有信息数据
Returns:
Dict[str, Any]: 包含所有信息数据的字典
"""
return self.data
def get_info(self, key: str) -> Optional[Any]:
"""获取特定属性的信息
Args:
key: 要获取的属性键名
Returns:
Optional[Any]: 属性值,如果键不存在则返回 None
"""
return self.data.get(key)
def get_info_list(self, key: str) -> List[Any]:
"""获取特定属性的信息列表
Args:
key: 要获取的属性键名
Returns:
List[Any]: 属性值列表,如果键不存在则返回空列表
"""
value = self.data.get(key)
if isinstance(value, list):
return value
return []
def set_info(self, key: str, value: Any) -> None:
"""设置特定属性的信息值
Args:
key: 要设置的属性键名
value: 要设置的属性值
"""
self.data[key] = value
def get_processed_info(self) -> str:
"""获取处理后的信息
Returns:
str: 处理后的信息字符串
"""
info_str = ""
# print(f"self.data: {self.data}")
for key, value in self.data.items():
# print(f"key: {key}, value: {value}")
info_str += f"信息类型:{key},信息内容:{value}\n"
return info_str

View File

@@ -1,186 +0,0 @@
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import time
from src.common.logger import get_logger
from src.individuality.individuality import get_individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.tools.tool_use import ToolUser
from src.chat.utils.json_utils import process_llm_tool_calls
from .base_processor import BaseProcessor
from typing import List
from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.info.structured_info import StructuredInfo
from src.chat.heart_flow.observation.structure_observation import StructureObservation
logger = get_logger("processor")
def init_prompt():
# ... 原有代码 ...
# 添加工具执行器提示词
tool_executor_prompt = """
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}
群里正在进行的聊天内容:
{chat_observe_info}
请仔细分析聊天内容,考虑以下几点:
1. 内容中是否包含需要查询信息的问题
2. 是否有明确的工具使用指令
If you need to use a tool, please directly call the corresponding tool function. If you do not need to use any tool, simply output "No tool needed".
"""
Prompt(tool_executor_prompt, "tool_executor_prompt")
class ToolProcessor(BaseProcessor):
log_prefix = "工具执行器"
def __init__(self, subheartflow_id: str):
super().__init__()
self.subheartflow_id = subheartflow_id
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
self.llm_model = LLMRequest(
model=global_config.model.focus_tool_use,
request_type="focus.processor.tool",
)
self.structured_info = []
async def process_info(
self,
observations: List[Observation] = None,
action_type: str = None,
action_data: dict = None,
**kwargs,
) -> List[StructuredInfo]:
"""处理信息对象
Args:
observations: 可选的观察列表包含ChattingObservation和StructureObservation类型
action_type: 动作类型
action_data: 动作数据
**kwargs: 其他可选参数
Returns:
list: 处理后的结构化信息列表
"""
working_infos = []
result = []
if observations:
for observation in observations:
if isinstance(observation, ChattingObservation):
result, used_tools, prompt = await self.execute_tools(observation)
logger.info(f"工具调用结果: {result}")
# 更新WorkingObservation中的结构化信息
for observation in observations:
if isinstance(observation, StructureObservation):
for structured_info in result:
# logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}")
observation.add_structured_info(structured_info)
working_infos = observation.get_observe_info()
logger.debug(f"{self.log_prefix} 获取更新后WorkingObservation中的结构化信息: {working_infos}")
structured_info = StructuredInfo()
if working_infos:
for working_info in working_infos:
structured_info.set_info(key=working_info.get("type"), value=working_info.get("content"))
return [structured_info]
async def execute_tools(self, observation: ChattingObservation, action_type: str = None, action_data: dict = None):
"""
并行执行工具,返回结构化信息
参数:
sub_mind: 子思维对象
chat_target_name: 聊天目标名称,默认为"对方"
is_group_chat: 是否为群聊默认为False
return_details: 是否返回详细信息默认为False
cycle_info: 循环信息对象,可用于记录详细执行信息
action_type: 动作类型
action_data: 动作数据
返回:
如果return_details为False:
List[Dict]: 工具执行结果的结构化信息列表
如果return_details为True:
Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词)
"""
tool_instance = ToolUser()
tools = tool_instance._define_tools()
# logger.debug(f"observation: {observation}")
# logger.debug(f"observation.chat_target_info: {observation.chat_target_info}")
# logger.debug(f"observation.is_group_chat: {observation.is_group_chat}")
# logger.debug(f"observation.person_list: {observation.person_list}")
is_group_chat = observation.is_group_chat
# chat_observe_info = observation.get_observe_info()
chat_observe_info = observation.talking_message_str_truncate_short
# person_list = observation.person_list
# 获取时间信息
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 构建专用于工具调用的提示词
prompt = await global_prompt_manager.format_prompt(
"tool_executor_prompt",
chat_observe_info=chat_observe_info,
is_group_chat=is_group_chat,
bot_name=get_individuality().name,
time_now=time_now,
)
# 调用LLM专注于工具使用
# logger.info(f"开始执行工具调用{prompt}")
response, other_info = await self.llm_model.generate_response_async(prompt=prompt, tools=tools)
if len(other_info) == 3:
reasoning_content, model_name, tool_calls = other_info
else:
reasoning_content, model_name = other_info
tool_calls = None
# print("tooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltool")
if tool_calls:
logger.info(f"获取到工具原始输出:\n{tool_calls}")
# 处理工具调用和结果收集类似于SubMind中的逻辑
new_structured_items = []
used_tools = [] # 记录使用了哪些工具
if tool_calls:
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
if success and valid_tool_calls:
for tool_call in valid_tool_calls:
try:
# 记录使用的工具名称
tool_name = tool_call.get("name", "unknown_tool")
used_tools.append(tool_name)
result = await tool_instance._execute_tool_call(tool_call)
name = result.get("type", "unknown_type")
content = result.get("content", "")
logger.info(f"工具{name},获得信息:{content}")
if result:
new_item = {
"type": result.get("type", "unknown_type"),
"id": result.get("id", f"tool_exec_{time.time()}"),
"content": result.get("content", ""),
"ttl": 3,
}
new_structured_items.append(new_item)
except Exception as e:
logger.error(f"{self.log_prefix}工具执行失败: {e}")
return new_structured_items, used_tools, prompt
init_prompt()

View File

@@ -46,9 +46,12 @@ def init_prompt():
# --- Group Chat Prompt --- # --- Group Chat Prompt ---
memory_activator_prompt = """ memory_activator_prompt = """
你是一个记忆分析器,你需要根据以下信息来进行回忆 你是一个记忆分析器,你需要根据以下信息来进行回忆
以下是一聊天中的信息,请根据这些信息,总结出几个关键词作为记忆回忆的触发词 以下是一聊天记录,请根据这些信息,总结出几个关键词作为记忆回忆的触发词
聊天记录:
{obs_info_text} {obs_info_text}
你想要回复的消息:
{target_message}
历史关键词(请避免重复提取这些关键词): 历史关键词(请避免重复提取这些关键词):
{cached_keywords} {cached_keywords}
@@ -69,12 +72,12 @@ class MemoryActivator:
self.summary_model = LLMRequest( self.summary_model = LLMRequest(
model=global_config.model.memory_summary, model=global_config.model.memory_summary,
temperature=0.7, temperature=0.7,
request_type="focus.memory_activator", request_type="memory_activator",
) )
self.running_memory = [] self.running_memory = []
self.cached_keywords = set() # 用于缓存历史关键词 self.cached_keywords = set() # 用于缓存历史关键词
async def activate_memory_with_chat_history(self, chat_id, target_message, chat_history_prompt) -> List[Dict]: async def activate_memory_with_chat_history(self, target_message, chat_history_prompt) -> List[Dict]:
""" """
激活记忆 激活记忆
@@ -88,23 +91,13 @@ class MemoryActivator:
if not global_config.memory.enable_memory: if not global_config.memory.enable_memory:
return [] return []
# obs_info_text = ""
# for observation in observations:
# if isinstance(observation, ChattingObservation):
# obs_info_text += observation.talking_message_str_truncate_short
# elif isinstance(observation, StructureObservation):
# working_info = observation.get_observe_info()
# for working_info_item in working_info:
# obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n"
# logger.info(f"回忆待检索内容obs_info_text: {obs_info_text}")
# 将缓存的关键词转换为字符串用于prompt # 将缓存的关键词转换为字符串用于prompt
cached_keywords_str = ", ".join(self.cached_keywords) if self.cached_keywords else "暂无历史关键词" cached_keywords_str = ", ".join(self.cached_keywords) if self.cached_keywords else "暂无历史关键词"
prompt = await global_prompt_manager.format_prompt( prompt = await global_prompt_manager.format_prompt(
"memory_activator_prompt", "memory_activator_prompt",
obs_info_text=chat_history_prompt, obs_info_text=chat_history_prompt,
target_message=target_message,
cached_keywords=cached_keywords_str, cached_keywords=cached_keywords_str,
) )
@@ -130,9 +123,6 @@ class MemoryActivator:
related_memory = await hippocampus_manager.get_memory_from_topic( related_memory = await hippocampus_manager.get_memory_from_topic(
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3 valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
) )
# related_memory = await hippocampus_manager.get_memory_from_text(
# text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=False
# )
logger.info(f"获取到的记忆: {related_memory}") logger.info(f"获取到的记忆: {related_memory}")

View File

@@ -8,14 +8,9 @@ from src.chat.utils.chat_message_builder import (
get_person_id_list, get_person_id_list,
) )
from src.chat.utils.prompt_builder import global_prompt_manager, Prompt from src.chat.utils.prompt_builder import global_prompt_manager, Prompt
from typing import Optional
import difflib
from src.chat.message_receive.message import MessageRecv
from src.chat.heart_flow.observation.observation import Observation from src.chat.heart_flow.observation.observation import Observation
from src.common.logger import get_logger from src.common.logger import get_logger
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import get_chat_manager
from src.person_info.person_info import get_person_info_manager
logger = get_logger("observation") logger = get_logger("observation")
@@ -108,75 +103,6 @@ class ChattingObservation(Observation):
def get_observe_info(self, ids=None): def get_observe_info(self, ids=None):
return self.talking_message_str return self.talking_message_str
def get_recv_message_by_text(self, sender: str, text: str) -> Optional[MessageRecv]:
"""
根据回复的纯文本
1. 在talking_message中查找最新的最匹配的消息
2. 如果找到,则返回消息
"""
find_msg = None
reverse_talking_message = list(reversed(self.talking_message))
for message in reverse_talking_message:
user_id = message["user_id"]
platform = message["platform"]
person_id = get_person_info_manager().get_person_id(platform, user_id)
person_name = get_person_info_manager().get_value(person_id, "person_name")
if person_name == sender:
similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio()
if similarity >= 0.9:
find_msg = message
break
if not find_msg:
return None
user_info = {
"platform": find_msg.get("user_platform", ""),
"user_id": find_msg.get("user_id", ""),
"user_nickname": find_msg.get("user_nickname", ""),
"user_cardname": find_msg.get("user_cardname", ""),
}
group_info = {}
if find_msg.get("chat_info_group_id"):
group_info = {
"platform": find_msg.get("chat_info_group_platform", ""),
"group_id": find_msg.get("chat_info_group_id", ""),
"group_name": find_msg.get("chat_info_group_name", ""),
}
content_format = ""
accept_format = ""
template_items = {}
format_info = {"content_format": content_format, "accept_format": accept_format}
template_info = {
"template_items": template_items,
}
message_info = {
"platform": self.platform,
"message_id": find_msg.get("message_id"),
"time": find_msg.get("time"),
"group_info": group_info,
"user_info": user_info,
"additional_config": find_msg.get("additional_config"),
"format_info": format_info,
"template_info": template_info,
}
message_dict = {
"message_info": message_info,
"raw_message": find_msg.get("processed_plain_text"),
"detailed_plain_text": find_msg.get("processed_plain_text"),
"processed_plain_text": find_msg.get("processed_plain_text"),
}
find_rec_msg = MessageRecv(message_dict)
find_rec_msg.update_chat_stream(get_chat_manager().get_or_create_stream(self.chat_id))
return find_rec_msg
async def observe(self): async def observe(self):
# 自上一次观察的新消息 # 自上一次观察的新消息
new_messages_list = get_raw_msg_by_timestamp_with_chat( new_messages_list = get_raw_msg_by_timestamp_with_chat(

View File

@@ -1,42 +0,0 @@
from datetime import datetime
from src.common.logger import get_logger
# Import the new utility function
logger = get_logger("observation")
# 所有观察的基类
class StructureObservation:
def __init__(self, observe_id):
self.observe_info = ""
self.observe_id = observe_id
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
self.history_loop = []
self.structured_info = []
def to_dict(self) -> dict:
"""将观察对象转换为可序列化的字典"""
return {
"observe_info": self.observe_info,
"observe_id": self.observe_id,
"last_observe_time": self.last_observe_time,
"history_loop": self.history_loop,
"structured_info": self.structured_info,
}
def get_observe_info(self):
return self.structured_info
def add_structured_info(self, structured_info: dict):
self.structured_info.append(structured_info)
async def observe(self):
observed_structured_infos = []
for structured_info in self.structured_info:
if structured_info.get("ttl") > 0:
structured_info["ttl"] -= 1
observed_structured_infos.append(structured_info)
logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
self.structured_info = observed_structured_infos

View File

@@ -62,7 +62,10 @@ class SubHeartflow:
"""异步初始化方法,创建兴趣流并确定聊天类型""" """异步初始化方法,创建兴趣流并确定聊天类型"""
# 根据配置决定初始状态 # 根据配置决定初始状态
if global_config.chat.chat_mode == "focus": if not self.is_group_chat:
logger.debug(f"{self.log_prefix} 检测到是私聊,将直接尝试进入 FOCUSED 状态。")
await self.change_chat_state(ChatState.FOCUSED)
elif global_config.chat.chat_mode == "focus":
logger.debug(f"{self.log_prefix} 配置为 focus 模式,将直接尝试进入 FOCUSED 状态。") logger.debug(f"{self.log_prefix} 配置为 focus 模式,将直接尝试进入 FOCUSED 状态。")
await self.change_chat_state(ChatState.FOCUSED) await self.change_chat_state(ChatState.FOCUSED)
else: # "auto" 或其他模式保持原有逻辑或默认为 NORMAL else: # "auto" 或其他模式保持原有逻辑或默认为 NORMAL

View File

@@ -91,16 +91,10 @@ class SubHeartflowManager:
return subflow return subflow
try: try:
# 初始化子心流, 传入 mai_state_info
new_subflow = SubHeartflow( new_subflow = SubHeartflow(
subheartflow_id, subheartflow_id,
) )
# 首先创建并添加聊天观察者
# observation = ChattingObservation(chat_id=subheartflow_id)
# await observation.initialize()
# new_subflow.add_observation(observation)
# 然后再进行异步初始化,此时 SubHeartflow 内部若需启动 HeartFChatting就能拿到 observation # 然后再进行异步初始化,此时 SubHeartflow 内部若需启动 HeartFChatting就能拿到 observation
await new_subflow.initialize() await new_subflow.initialize()

View File

@@ -1,4 +1,5 @@
import traceback import traceback
import os
from typing import Dict, Any from typing import Dict, Any
from src.common.logger import get_logger from src.common.logger import get_logger
@@ -13,13 +14,65 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.config.config import global_config from src.config.config import global_config
from src.plugin_system.core.component_registry import component_registry # 导入新插件系统 from src.plugin_system.core.component_registry import component_registry # 导入新插件系统
from src.plugin_system.base.base_command import BaseCommand from src.plugin_system.base.base_command import BaseCommand
from src.mais4u.mais4u_chat.s4u_msg_processor import S4UMessageProcessor
from maim_message import UserInfo
from src.chat.message_receive.chat_stream import ChatStream
import re
# 定义日志配置 # 定义日志配置
# 获取项目根目录假设本文件在src/chat/message_receive/下,根目录为上上上级目录)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
ENABLE_S4U_CHAT = os.path.isfile(os.path.join(PROJECT_ROOT, "s4u.s4u"))
if ENABLE_S4U_CHAT:
print("""\nS4U私聊模式已开启\n!!!!!!!!!!!!!!!!!\n""")
# 仅内部开启
# 配置主程序日志格式 # 配置主程序日志格式
logger = get_logger("chat") logger = get_logger("chat")
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否包含过滤词
Args:
text: 待检查的文本
chat: 聊天对象
userinfo: 用户信息
Returns:
bool: 是否包含过滤词
"""
for word in global_config.message_receive.ban_words:
if word in text:
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
logger.info(f"[过滤词识别]消息中含有{word}filtered")
return True
return False
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
"""检查消息是否匹配过滤正则表达式
Args:
text: 待检查的文本
chat: 聊天对象
userinfo: 用户信息
Returns:
bool: 是否匹配过滤正则
"""
for pattern in global_config.message_receive.ban_msgs_regex:
if re.search(pattern, text):
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
logger.info(f"[正则表达式过滤]消息匹配到{pattern}filtered")
return True
return False
class ChatBot: class ChatBot:
def __init__(self): def __init__(self):
self.bot = None # bot 实例引用 self.bot = None # bot 实例引用
@@ -30,6 +83,7 @@ class ChatBot:
# 创建初始化PFC管理器的任务会在_ensure_started时执行 # 创建初始化PFC管理器的任务会在_ensure_started时执行
self.only_process_chat = MessageProcessor() self.only_process_chat = MessageProcessor()
self.pfc_manager = PFCManager.get_instance() self.pfc_manager = PFCManager.get_instance()
self.s4u_message_processor = S4UMessageProcessor()
async def _ensure_started(self): async def _ensure_started(self):
"""确保所有任务已启动""" """确保所有任务已启动"""
@@ -38,17 +92,6 @@ class ChatBot:
self._started = True self._started = True
async def _create_pfc_chat(self, message: MessageRecv):
try:
if global_config.experimental.pfc_chatting:
chat_id = str(message.chat_stream.stream_id)
private_name = str(message.message_info.user_info.user_nickname)
await self.pfc_manager.get_or_create_conversation(chat_id, private_name)
except Exception as e:
logger.error(f"创建PFC聊天失败: {e}")
async def _process_commands_with_new_system(self, message: MessageRecv): async def _process_commands_with_new_system(self, message: MessageRecv):
# sourcery skip: use-named-expression # sourcery skip: use-named-expression
"""使用新插件系统处理命令""" """使用新插件系统处理命令"""
@@ -139,14 +182,20 @@ class ChatBot:
get_chat_manager().register_message(message) get_chat_manager().register_message(message)
# 创建聊天流
chat = await get_chat_manager().get_or_create_stream( chat = await get_chat_manager().get_or_create_stream(
platform=message.message_info.platform, platform=message.message_info.platform,
user_info=user_info, user_info=user_info,
group_info=group_info, group_info=group_info,
) )
message.update_chat_stream(chat) message.update_chat_stream(chat)
# 过滤检查
if _check_ban_words(message.processed_plain_text, chat, user_info) or _check_ban_regex(
message.raw_message, chat, user_info
):
return
# 处理消息内容,生成纯文本 # 处理消息内容,生成纯文本
await message.process() await message.process()
@@ -172,24 +221,12 @@ class ChatBot:
template_group_name = None template_group_name = None
async def preprocess(): async def preprocess():
logger.debug("开始预处理消息...") if ENABLE_S4U_CHAT:
# 如果在私聊中 logger.info("进入S4U流程")
if group_info is None: await self.s4u_message_processor.process_message(message)
logger.debug("检测到私聊消息") return
if global_config.experimental.pfc_chatting:
logger.debug("进入PFC私聊处理流程") await self.heartflow_message_receiver.process_message(message)
# 创建聊天流
logger.debug(f"{user_info.user_id}创建/获取聊天流")
await self.only_process_chat.process_message(message)
await self._create_pfc_chat(message)
# 禁止PFC进入普通的心流消息处理逻辑
else:
logger.debug("进入普通心流私聊处理")
await self.heartflow_message_receiver.process_message(message)
# 群聊默认进入心流消息处理逻辑
else:
logger.debug(f"检测到群聊消息群ID: {group_info.group_id}")
await self.heartflow_message_receiver.process_message(message)
if template_group_name: if template_group_name:
async with global_prompt_manager.async_message_scope(template_group_name): async with global_prompt_manager.async_message_scope(template_group_name):

View File

@@ -301,6 +301,7 @@ class MessageSending(MessageProcessBase):
is_emoji: bool = False, is_emoji: bool = False,
thinking_start_time: float = 0, thinking_start_time: float = 0,
apply_set_reply_logic: bool = False, apply_set_reply_logic: bool = False,
reply_to: str = None,
): ):
# 调用父类初始化 # 调用父类初始化
super().__init__( super().__init__(
@@ -319,6 +320,8 @@ class MessageSending(MessageProcessBase):
self.is_emoji = is_emoji self.is_emoji = is_emoji
self.apply_set_reply_logic = apply_set_reply_logic self.apply_set_reply_logic = apply_set_reply_logic
self.reply_to = reply_to
# 用于显示发送内容与显示不一致的情况 # 用于显示发送内容与显示不一致的情况
self.display_message = display_message self.display_message = display_message

View File

@@ -35,9 +35,13 @@ class MessageStorage:
filtered_display_message = re.sub(pattern, "", display_message, flags=re.DOTALL) filtered_display_message = re.sub(pattern, "", display_message, flags=re.DOTALL)
else: else:
filtered_display_message = "" filtered_display_message = ""
reply_to = message.reply_to
else: else:
filtered_display_message = "" filtered_display_message = ""
reply_to = ""
chat_info_dict = chat_stream.to_dict() chat_info_dict = chat_stream.to_dict()
user_info_dict = message.message_info.user_info.to_dict() user_info_dict = message.message_info.user_info.to_dict()
@@ -54,6 +58,7 @@ class MessageStorage:
time=float(message.message_info.time), time=float(message.message_info.time),
chat_id=chat_stream.stream_id, chat_id=chat_stream.stream_id,
# Flattened chat_info # Flattened chat_info
reply_to=reply_to,
chat_info_stream_id=chat_info_dict.get("stream_id"), chat_info_stream_id=chat_info_dict.get("stream_id"),
chat_info_platform=chat_info_dict.get("platform"), chat_info_platform=chat_info_dict.get("platform"),
chat_info_user_platform=user_info_from_chat.get("platform"), chat_info_user_platform=user_info_from_chat.get("platform"),

View File

@@ -29,7 +29,6 @@ import traceback
from .normal_chat_generator import NormalChatGenerator from .normal_chat_generator import NormalChatGenerator
from src.chat.normal_chat.normal_chat_expressor import NormalChatExpressor from src.chat.normal_chat.normal_chat_expressor import NormalChatExpressor
from src.chat.replyer.default_generator import DefaultReplyer
from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner
from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier
@@ -69,7 +68,6 @@ class NormalChat:
# 初始化Normal Chat专用表达器 # 初始化Normal Chat专用表达器
self.expressor = NormalChatExpressor(self.chat_stream) self.expressor = NormalChatExpressor(self.chat_stream)
self.replyer = DefaultReplyer(self.chat_stream)
# Interest dict # Interest dict
self.interest_dict = interest_dict self.interest_dict = interest_dict

View File

@@ -16,7 +16,7 @@ class NormalChatGenerator:
model_config_1 = global_config.model.replyer_1.copy() model_config_1 = global_config.model.replyer_1.copy()
model_config_2 = global_config.model.replyer_2.copy() model_config_2 = global_config.model.replyer_2.copy()
prob_first = global_config.normal_chat.normal_chat_first_probability prob_first = global_config.chat.replyer_random_probability
model_config_1["weight"] = prob_first model_config_1["weight"] = prob_first
model_config_2["weight"] = 1.0 - prob_first model_config_2["weight"] = 1.0 - prob_first
@@ -42,15 +42,13 @@ class NormalChatGenerator:
relation_info = await person_info_manager.get_value(person_id, "short_impression") relation_info = await person_info_manager.get_value(person_id, "short_impression")
reply_to_str = f"{person_name}:{message.processed_plain_text}" reply_to_str = f"{person_name}:{message.processed_plain_text}"
structured_info = ""
try: try:
success, reply_set, prompt = await generator_api.generate_reply( success, reply_set, prompt = await generator_api.generate_reply(
chat_stream=message.chat_stream, chat_stream=message.chat_stream,
reply_to=reply_to_str, reply_to=reply_to_str,
relation_info=relation_info, relation_info=relation_info,
structured_info=structured_info,
available_actions=available_actions, available_actions=available_actions,
enable_tool=global_config.tool.enable_in_normal_chat,
model_configs=self.model_configs, model_configs=self.model_configs,
request_type="normal.replyer", request_type="normal.replyer",
return_prompt=True, return_prompt=True,

View File

@@ -15,7 +15,6 @@ from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.chat.express.exprssion_learner import get_expression_learner
import time import time
import asyncio import asyncio
from src.chat.express.expression_selector import expression_selector from src.chat.express.expression_selector import expression_selector
@@ -28,6 +27,7 @@ from datetime import datetime
import re import re
from src.chat.knowledge.knowledge_lib import qa_manager from src.chat.knowledge.knowledge_lib import qa_manager
from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.memory_activator import MemoryActivator
from src.tools.tool_executor import ToolExecutor
logger = get_logger("replyer") logger = get_logger("replyer")
@@ -36,13 +36,14 @@ def init_prompt():
Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1") Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1") Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
Prompt("在群里聊天", "chat_target_group2") Prompt("在群里聊天", "chat_target_group2")
Prompt("{sender_name}", "chat_target_private2") Prompt("{sender_name}", "chat_target_private2")
Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt") Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
Prompt( Prompt(
""" """
{expression_habits_block} {expression_habits_block}
{structured_info_block} {tool_info_block}
{knowledge_prompt}
{memory_block} {memory_block}
{relation_info_block} {relation_info_block}
{extra_info_block} {extra_info_block}
@@ -67,88 +68,52 @@ def init_prompt():
Prompt( Prompt(
""" """
{expression_habits_block} {expression_habits_block}
{structured_info_block}
{memory_block}
{relation_info_block} {relation_info_block}
{extra_info_block}
{chat_target}
{time_block} {time_block}
{chat_target}
{chat_info} {chat_info}
现在"{sender_name}"说:{target_message}。你想要回复对方的这条消息。 {identity}
{identity}
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。注意不要复读你说过的话。
{config_expression_style}。回复不要浮夸,不要用夸张修辞,平淡一些。 你正在{chat_target_2},{reply_target_block}
{keywords_reaction_prompt} 对这句话,你想表达,原句:{raw_reply},原因是:{reason}。你现在要思考怎么组织回复
请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。
不要浮夸,不要夸张修辞,请注意不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出一条回复就好。
现在,你说:
""",
"default_generator_private_prompt",
)
Prompt(
"""
你可以参考你的以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
{style_habbits}
你现在正在群里聊天,以下是群里正在进行的聊天内容:
{chat_info}
以上是聊天内容,你需要了解聊天记录中的内容
{chat_target}
你的名字是{bot_name}{prompt_personality},在这聊天中,"{sender_name}"说的"{target_message}"引起了你的注意,对这句话,你想表达:{raw_reply},原因是:{reason}。你现在要思考怎么回复
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。请你修改你想表达的原句,符合你的表达风格和语言习惯 你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。请你修改你想表达的原句,符合你的表达风格和语言习惯
请你根据情景使用以下句法:
{grammar_habbits}
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。 {config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
{keywords_reaction_prompt}
{moderation_prompt}
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。 不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。
现在,你说: 现在,你说:
""", """,
"default_expressor_prompt", "default_expressor_prompt",
) )
Prompt(
"""
你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
{style_habbits}
你现在正在群里聊天,以下是群里正在进行的聊天内容:
{chat_info}
以上是聊天内容,你需要了解聊天记录中的内容
{chat_target}
你的名字是{bot_name}{prompt_personality},在这聊天中,"{sender_name}"说的"{target_message}"引起了你的注意,对这句话,你想表达:{raw_reply},原因是:{reason}。你现在要思考怎么回复
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。
请你根据情景使用以下句法:
{grammar_habbits}
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。
现在,你说:
""",
"default_expressor_private_prompt", # New template for private FOCUSED chat
)
class DefaultReplyer: class DefaultReplyer:
def __init__( def __init__(
self, self,
chat_stream: ChatStream, chat_stream: ChatStream,
enable_tool: bool = False,
model_configs: Optional[List[Dict[str, Any]]] = None, model_configs: Optional[List[Dict[str, Any]]] = None,
request_type: str = "focus.replyer", request_type: str = "focus.replyer",
): ):
self.log_prefix = "replyer" self.log_prefix = "replyer"
self.request_type = request_type self.request_type = request_type
self.enable_tool = enable_tool
if model_configs: if model_configs:
self.express_model_configs = model_configs self.express_model_configs = model_configs
else: else:
# 当未提供配置时,使用默认配置并赋予默认权重 # 当未提供配置时,使用默认配置并赋予默认权重
default_config = global_config.model.replyer_1.copy()
default_config.setdefault("weight", 1.0) model_config_1 = global_config.model.replyer_1.copy()
self.express_model_configs = [default_config] model_config_2 = global_config.model.replyer_2.copy()
prob_first = global_config.chat.replyer_random_probability
model_config_1["weight"] = prob_first
model_config_2["weight"] = 1.0 - prob_first
self.express_model_configs = [model_config_1, model_config_2]
if not self.express_model_configs: if not self.express_model_configs:
logger.warning("未找到有效的模型配置,回复生成可能会失败。") logger.warning("未找到有效的模型配置,回复生成可能会失败。")
@@ -157,12 +122,13 @@ class DefaultReplyer:
fallback_config.setdefault("weight", 1.0) fallback_config.setdefault("weight", 1.0)
self.express_model_configs = [fallback_config] self.express_model_configs = [fallback_config]
self.heart_fc_sender = HeartFCSender()
self.memory_activator = MemoryActivator()
self.chat_stream = chat_stream self.chat_stream = chat_stream
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id) self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id)
self.heart_fc_sender = HeartFCSender()
self.memory_activator = MemoryActivator()
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3)
def _select_weighted_model_config(self) -> Dict[str, Any]: def _select_weighted_model_config(self) -> Dict[str, Any]:
"""使用加权随机选择来挑选一个模型配置""" """使用加权随机选择来挑选一个模型配置"""
configs = self.express_model_configs configs = self.express_model_configs
@@ -205,7 +171,6 @@ class DefaultReplyer:
reply_data: Dict[str, Any] = None, reply_data: Dict[str, Any] = None,
reply_to: str = "", reply_to: str = "",
relation_info: str = "", relation_info: str = "",
structured_info: str = "",
extra_info: str = "", extra_info: str = "",
available_actions: List[str] = None, available_actions: List[str] = None,
) -> Tuple[bool, Optional[str]]: ) -> Tuple[bool, Optional[str]]:
@@ -222,7 +187,6 @@ class DefaultReplyer:
reply_data = { reply_data = {
"reply_to": reply_to, "reply_to": reply_to,
"relation_info": relation_info, "relation_info": relation_info,
"structured_info": structured_info,
"extra_info": extra_info, "extra_info": extra_info,
} }
for key, value in reply_data.items(): for key, value in reply_data.items():
@@ -246,7 +210,7 @@ class DefaultReplyer:
# 加权随机选择一个模型配置 # 加权随机选择一个模型配置
selected_model_config = self._select_weighted_model_config() selected_model_config = self._select_weighted_model_config()
logger.info( logger.info(
f"{self.log_prefix} 使用模型配置: {selected_model_config.get('model_name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})" f"{self.log_prefix} 使用模型配置: {selected_model_config.get('name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})"
) )
express_model = LLMRequest( express_model = LLMRequest(
@@ -271,20 +235,29 @@ class DefaultReplyer:
traceback.print_exc() traceback.print_exc()
return False, None return False, None
async def rewrite_reply_with_context(self, reply_data: Dict[str, Any]) -> Tuple[bool, Optional[str]]: async def rewrite_reply_with_context(
self,
reply_data: Dict[str, Any],
raw_reply: str = "",
reason: str = "",
reply_to: str = "",
relation_info: str = "",
) -> Tuple[bool, Optional[str]]:
""" """
表达器 (Expressor): 核心逻辑,负责生成回复文本。 表达器 (Expressor): 核心逻辑,负责生成回复文本。
""" """
try: try:
reply_to = reply_data.get("reply_to", "") if not reply_data:
raw_reply = reply_data.get("raw_reply", "") reply_data = {
reason = reply_data.get("reason", "") "reply_to": reply_to,
"relation_info": relation_info,
}
with Timer("构建Prompt", {}): # 内部计时器,可选保留 with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await self.build_prompt_rewrite_context( prompt = await self.build_prompt_rewrite_context(
raw_reply=raw_reply, raw_reply=raw_reply,
reason=reason, reason=reason,
reply_to=reply_to, reply_data=reply_data,
) )
content = None content = None
@@ -309,8 +282,7 @@ class DefaultReplyer:
content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt) content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt)
logger.info(f"想要表达:{raw_reply}||理由:{reason}") logger.info(f"想要表达:{raw_reply}||理由:{reason}||生成回复: {content}\n")
logger.info(f"最终回复: {content}\n")
except Exception as llm_e: except Exception as llm_e:
# 精简报错信息 # 精简报错信息
@@ -325,6 +297,9 @@ class DefaultReplyer:
return False, None return False, None
async def build_relation_info(self, reply_data=None, chat_history=None): async def build_relation_info(self, reply_data=None, chat_history=None):
if not global_config.relationship.enable_relationship:
return ""
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id) relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
if not reply_data: if not reply_data:
return "" return ""
@@ -344,6 +319,9 @@ class DefaultReplyer:
return relation_info return relation_info
async def build_expression_habits(self, chat_history, target): async def build_expression_habits(self, chat_history, target):
if not global_config.expression.enable_expression:
return ""
style_habbits = [] style_habbits = []
grammar_habbits = [] grammar_habbits = []
@@ -379,8 +357,11 @@ class DefaultReplyer:
return expression_habits_block return expression_habits_block
async def build_memory_block(self, chat_history, target): async def build_memory_block(self, chat_history, target):
if not global_config.memory.enable_memory:
return ""
running_memorys = await self.memory_activator.activate_memory_with_chat_history( running_memorys = await self.memory_activator.activate_memory_with_chat_history(
chat_id=self.chat_stream.stream_id, target_message=target, chat_history_prompt=chat_history target_message=target, chat_history_prompt=chat_history
) )
if running_memorys: if running_memorys:
@@ -394,6 +375,52 @@ class DefaultReplyer:
return memory_block return memory_block
async def build_tool_info(self, reply_data=None, chat_history=None):
"""构建工具信息块
Args:
reply_data: 回复数据,包含要回复的消息内容
chat_history: 聊天历史
Returns:
str: 工具信息字符串
"""
if not reply_data:
return ""
reply_to = reply_data.get("reply_to", "")
sender, text = self._parse_reply_target(reply_to)
if not text:
return ""
try:
# 使用工具执行器获取信息
tool_results = await self.tool_executor.execute_from_chat_message(
sender=sender, target_message=text, chat_history=chat_history, return_details=False
)
if tool_results:
tool_info_str = "以下是你通过工具获取到的实时信息:\n"
for tool_result in tool_results:
tool_name = tool_result.get("tool_name", "unknown")
content = tool_result.get("content", "")
result_type = tool_result.get("type", "info")
tool_info_str += f"- 【{tool_name}{result_type}: {content}\n"
tool_info_str += "以上是你获取到的实时信息,请在回复时参考这些信息。"
logger.info(f"{self.log_prefix} 获取到 {len(tool_results)} 个工具结果")
return tool_info_str
else:
logger.debug(f"{self.log_prefix} 未获取到任何工具结果")
return ""
except Exception as e:
logger.error(f"{self.log_prefix} 工具信息获取失败: {e}")
return ""
def _parse_reply_target(self, target_message: str) -> tuple: def _parse_reply_target(self, target_message: str) -> tuple:
sender = "" sender = ""
target = "" target = ""
@@ -457,8 +484,6 @@ class DefaultReplyer:
person_info_manager = get_person_info_manager() person_info_manager = get_person_info_manager()
bot_person_id = person_info_manager.get_person_id("system", "bot_id") bot_person_id = person_info_manager.get_person_id("system", "bot_id")
is_group_chat = bool(chat_stream.group_info) is_group_chat = bool(chat_stream.group_info)
structured_info = reply_data.get("structured_info", "")
reply_to = reply_data.get("reply_to", "none") reply_to = reply_data.get("reply_to", "none")
extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "") extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "")
@@ -502,21 +527,22 @@ class DefaultReplyer:
show_actions=True, show_actions=True,
) )
# 并行执行个构建任务 # 并行执行个构建任务
expression_habits_block, relation_info, memory_block = await asyncio.gather( expression_habits_block, relation_info, memory_block, tool_info = await asyncio.gather(
self.build_expression_habits(chat_talking_prompt_half, target), self.build_expression_habits(chat_talking_prompt_half, target),
self.build_relation_info(reply_data, chat_talking_prompt_half), self.build_relation_info(reply_data, chat_talking_prompt_half),
self.build_memory_block(chat_talking_prompt_half, target), self.build_memory_block(chat_talking_prompt_half, target),
self.build_tool_info(reply_data, chat_talking_prompt_half),
) )
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target) keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
if structured_info: if tool_info:
structured_info_block = ( tool_info_block = (
f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{structured_info}\n以上是一些额外的信息。" f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{tool_info}\n以上是一些额外的信息。"
) )
else: else:
structured_info_block = "" tool_info_block = ""
if extra_info_block: if extra_info_block:
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策" extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
@@ -555,20 +581,25 @@ class DefaultReplyer:
"请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。" "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。"
) )
if is_group_chat: if sender and target:
if sender: if is_group_chat:
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。" if sender:
elif target: reply_target_block = (
reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。" f"现在{sender}说的:{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
else: )
reply_target_block = "现在,你想要在群里发言或者回复消息。" elif target:
else: # private chat reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
if sender: else:
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复" reply_target_block = "现在,你想要在群里发言或者回复消息"
elif target: else: # private chat
reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。" if sender:
else: reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复。"
reply_target_block = "现在,你想要回复。" elif target:
reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。"
else:
reply_target_block = "现在,你想要回复。"
else:
reply_target_block = ""
mood_prompt = mood_manager.get_mood_prompt() mood_prompt = mood_manager.get_mood_prompt()
@@ -576,173 +607,171 @@ class DefaultReplyer:
if prompt_info: if prompt_info:
prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info) prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info)
# --- Choose template based on chat type --- template_name = "default_generator_prompt"
if is_group_chat: if is_group_chat:
template_name = "default_generator_prompt"
# Group specific formatting variables (already fetched or default)
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1") chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
else:
prompt = await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
memory_block=memory_block,
structured_info_block=structured_info_block,
extra_info_block=extra_info_block,
relation_info_block=relation_info,
time_block=time_block,
reply_target_block=reply_target_block,
moderation_prompt=moderation_prompt_block,
keywords_reaction_prompt=keywords_reaction_prompt,
identity=indentify_block,
target_message=target,
sender_name=sender,
config_expression_style=global_config.expression.expression_style,
action_descriptions=action_descriptions,
chat_target_2=chat_target_2,
mood_prompt=mood_prompt,
)
else: # Private chat
template_name = "default_generator_private_prompt"
# 在私聊时获取对方的昵称信息
chat_target_name = "对方" chat_target_name = "对方"
if self.chat_target_info: if self.chat_target_info:
chat_target_name = ( chat_target_name = (
self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方" self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方"
) )
chat_target_1 = f"你正在和 {chat_target_name} 聊天" chat_target_1 = await global_prompt_manager.format_prompt(
prompt = await global_prompt_manager.format_prompt( "chat_target_private1", sender_name=chat_target_name
template_name,
expression_habits_block=expression_habits_block,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
memory_block=memory_block,
structured_info_block=structured_info_block,
relation_info_block=relation_info,
extra_info_block=extra_info_block,
time_block=time_block,
keywords_reaction_prompt=keywords_reaction_prompt,
identity=indentify_block,
target_message=target,
sender_name=sender,
config_expression_style=global_config.expression.expression_style,
) )
chat_target_2 = await global_prompt_manager.format_prompt(
"chat_target_private2", sender_name=chat_target_name
)
prompt = await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
memory_block=memory_block,
tool_info_block=tool_info_block,
knowledge_prompt=prompt_info,
extra_info_block=extra_info_block,
relation_info_block=relation_info,
time_block=time_block,
reply_target_block=reply_target_block,
moderation_prompt=moderation_prompt_block,
keywords_reaction_prompt=keywords_reaction_prompt,
identity=indentify_block,
target_message=target,
sender_name=sender,
config_expression_style=global_config.expression.expression_style,
action_descriptions=action_descriptions,
chat_target_2=chat_target_2,
mood_prompt=mood_prompt,
)
return prompt return prompt
async def build_prompt_rewrite_context( async def build_prompt_rewrite_context(
self, self,
reason, reply_data: Dict[str, Any],
raw_reply, raw_reply: str = "",
reply_to, reason: str = "",
) -> str: ) -> str:
sender = ""
target = ""
if ":" in reply_to or "" in reply_to:
# 使用正则表达式匹配中文或英文冒号
parts = re.split(pattern=r"[:]", string=reply_to, maxsplit=1)
if len(parts) == 2:
sender = parts[0].strip()
target = parts[1].strip()
chat_stream = self.chat_stream chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
person_info_manager = get_person_info_manager()
bot_person_id = person_info_manager.get_person_id("system", "bot_id")
is_group_chat = bool(chat_stream.group_info) is_group_chat = bool(chat_stream.group_info)
message_list_before_now = get_raw_msg_before_timestamp_with_chat( reply_to = reply_data.get("reply_to", "none")
chat_id=chat_stream.stream_id, sender, target = self._parse_reply_target(reply_to)
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(), timestamp=time.time(),
limit=global_config.chat.max_context_size, limit=int(global_config.chat.max_context_size * 0.5),
) )
chat_talking_prompt = build_readable_messages( chat_talking_prompt_half = build_readable_messages(
message_list_before_now, message_list_before_now_half,
replace_bot_name=True, replace_bot_name=True,
merge_messages=True, merge_messages=False,
timestamp_mode="relative", timestamp_mode="relative",
read_mark=0.0, read_mark=0.0,
truncate=True, show_actions=True,
) )
expression_learner = get_expression_learner() # 并行执行2个构建任务
( expression_habits_block, relation_info = await asyncio.gather(
learnt_style_expressions, self.build_expression_habits(chat_talking_prompt_half, target),
learnt_grammar_expressions, self.build_relation_info(reply_data, chat_talking_prompt_half),
personality_expressions, )
) = expression_learner.get_expression_by_chat_id(chat_stream.stream_id)
style_habbits = [] keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
grammar_habbits = []
# 1. learnt_expressions加权随机选3条
if learnt_style_expressions:
weights = [expr["count"] for expr in learnt_style_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 2. learnt_grammar_expressions加权随机选3条
if learnt_grammar_expressions:
weights = [expr["count"] for expr in learnt_grammar_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
grammar_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 3. personality_expressions随机选1条
if personality_expressions:
expr = random.choice(personality_expressions)
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
style_habbits_str = "\n".join(style_habbits) time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
grammar_habbits_str = "\n".join(grammar_habbits)
logger.debug("开始构建 focus prompt") bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
short_impression = await person_info_manager.get_value(bot_person_id, "short_impression")
try:
if isinstance(short_impression, str) and short_impression.strip():
short_impression = ast.literal_eval(short_impression)
elif not short_impression:
logger.warning("short_impression为空使用默认值")
short_impression = ["友好活泼", "人类"]
except (ValueError, SyntaxError) as e:
logger.error(f"解析short_impression失败: {e}, 原始值: {short_impression}")
short_impression = ["友好活泼", "人类"]
# 确保short_impression是列表格式且有足够的元素
if not isinstance(short_impression, list) or len(short_impression) < 2:
logger.warning(f"short_impression格式不正确: {short_impression}, 使用默认值")
short_impression = ["友好活泼", "人类"]
personality = short_impression[0]
identity = short_impression[1]
prompt_personality = personality + "" + identity
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
moderation_prompt_block = (
"请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。"
)
if sender and target:
if is_group_chat:
if sender:
reply_target_block = (
f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。"
)
elif target:
reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
else:
reply_target_block = "现在,你想要在群里发言或者回复消息。"
else: # private chat
if sender:
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复。"
elif target:
reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。"
else:
reply_target_block = "现在,你想要回复。"
else:
reply_target_block = ""
mood_manager.get_mood_prompt()
# --- Choose template based on chat type ---
if is_group_chat: if is_group_chat:
template_name = "default_expressor_prompt"
# Group specific formatting variables (already fetched or default)
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1") chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2") chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
else:
prompt = await global_prompt_manager.format_prompt(
template_name,
style_habbits=style_habbits_str,
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
bot_name=global_config.bot.nickname,
prompt_personality="",
reason=reason,
raw_reply=raw_reply,
sender_name=sender,
target_message=target,
config_expression_style=global_config.expression.expression_style,
)
else: # Private chat
template_name = "default_expressor_private_prompt"
# 在私聊时获取对方的昵称信息
chat_target_name = "对方" chat_target_name = "对方"
if self.chat_target_info: if self.chat_target_info:
chat_target_name = ( chat_target_name = (
self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方" self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方"
) )
chat_target_1 = f"你正在和 {chat_target_name} 聊天" chat_target_1 = await global_prompt_manager.format_prompt(
prompt = await global_prompt_manager.format_prompt( "chat_target_private1", sender_name=chat_target_name
template_name,
style_habbits=style_habbits_str,
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
bot_name=global_config.bot.nickname,
prompt_personality="",
reason=reason,
raw_reply=raw_reply,
sender_name=sender,
target_message=target,
config_expression_style=global_config.expression.expression_style,
) )
chat_target_2 = await global_prompt_manager.format_prompt(
"chat_target_private2", sender_name=chat_target_name
)
template_name = "default_expressor_prompt"
prompt = await global_prompt_manager.format_prompt(
template_name,
expression_habits_block=expression_habits_block,
relation_info_block=relation_info,
chat_target=chat_target_1,
time_block=time_block,
chat_info=chat_talking_prompt_half,
identity=indentify_block,
chat_target_2=chat_target_2,
reply_target_block=reply_target_block,
raw_reply=raw_reply,
reason=reason,
config_expression_style=global_config.expression.expression_style,
keywords_reaction_prompt=keywords_reaction_prompt,
moderation_prompt=moderation_prompt_block,
)
return prompt return prompt

View File

@@ -14,6 +14,7 @@ class ReplyerManager:
self, self,
chat_stream: Optional[ChatStream] = None, chat_stream: Optional[ChatStream] = None,
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
enable_tool: bool = False,
model_configs: Optional[List[Dict[str, Any]]] = None, model_configs: Optional[List[Dict[str, Any]]] = None,
request_type: str = "replyer", request_type: str = "replyer",
) -> Optional[DefaultReplyer]: ) -> Optional[DefaultReplyer]:
@@ -49,6 +50,7 @@ class ReplyerManager:
# model_configs 只在此时(初始化时)生效 # model_configs 只在此时(初始化时)生效
replyer = DefaultReplyer( replyer = DefaultReplyer(
chat_stream=target_stream, chat_stream=target_stream,
enable_tool=enable_tool,
model_configs=model_configs, # 可以是None此时使用默认模型 model_configs=model_configs, # 可以是None此时使用默认模型
request_type=request_type, request_type=request_type,
) )

View File

@@ -174,6 +174,7 @@ def _build_readable_messages_internal(
truncate: bool = False, truncate: bool = False,
pic_id_mapping: Dict[str, str] = None, pic_id_mapping: Dict[str, str] = None,
pic_counter: int = 1, pic_counter: int = 1,
show_pic: bool = True,
) -> Tuple[str, List[Tuple[float, str, str]], Dict[str, str], int]: ) -> Tuple[str, List[Tuple[float, str, str]], Dict[str, str], int]:
""" """
内部辅助函数,构建可读消息字符串和原始消息详情列表。 内部辅助函数,构建可读消息字符串和原始消息详情列表。
@@ -260,7 +261,8 @@ def _build_readable_messages_internal(
content = content.replace("", "") content = content.replace("", "")
# 处理图片ID # 处理图片ID
content = process_pic_ids(content) if show_pic:
content = process_pic_ids(content)
# 检查必要信息是否存在 # 检查必要信息是否存在
if not all([platform, user_id, timestamp is not None]): if not all([platform, user_id, timestamp is not None]):
@@ -532,6 +534,7 @@ def build_readable_messages(
read_mark: float = 0.0, read_mark: float = 0.0,
truncate: bool = False, truncate: bool = False,
show_actions: bool = False, show_actions: bool = False,
show_pic: bool = True,
) -> str: ) -> str:
""" """
将消息列表转换为可读的文本格式。 将消息列表转换为可读的文本格式。
@@ -601,7 +604,7 @@ def build_readable_messages(
if read_mark <= 0: if read_mark <= 0:
# 没有有效的 read_mark直接格式化所有消息 # 没有有效的 read_mark直接格式化所有消息
formatted_string, _, pic_id_mapping, _ = _build_readable_messages_internal( formatted_string, _, pic_id_mapping, _ = _build_readable_messages_internal(
copy_messages, replace_bot_name, merge_messages, timestamp_mode, truncate copy_messages, replace_bot_name, merge_messages, timestamp_mode, truncate, show_pic=show_pic
) )
# 生成图片映射信息并添加到最前面 # 生成图片映射信息并添加到最前面
@@ -628,9 +631,17 @@ def build_readable_messages(
truncate, truncate,
pic_id_mapping, pic_id_mapping,
pic_counter, pic_counter,
show_pic=show_pic,
) )
formatted_after, _, pic_id_mapping, _ = _build_readable_messages_internal( formatted_after, _, pic_id_mapping, _ = _build_readable_messages_internal(
messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, False, pic_id_mapping, pic_counter messages_after_mark,
replace_bot_name,
merge_messages,
timestamp_mode,
False,
pic_id_mapping,
pic_counter,
show_pic=show_pic,
) )
read_mark_line = "\n--- 以上消息是你已经看过,请关注以下未读的新消息---\n" read_mark_line = "\n--- 以上消息是你已经看过,请关注以下未读的新消息---\n"

View File

@@ -127,6 +127,8 @@ class Messages(BaseModel):
chat_id = TextField(index=True) # 对应的 ChatStreams stream_id chat_id = TextField(index=True) # 对应的 ChatStreams stream_id
reply_to = TextField(null=True)
# 从 chat_info 扁平化而来的字段 # 从 chat_info 扁平化而来的字段
chat_info_stream_id = TextField() chat_info_stream_id = TextField()
chat_info_platform = TextField() chat_info_platform = TextField()

View File

@@ -30,11 +30,11 @@ from src.config.official_configs import (
TelemetryConfig, TelemetryConfig,
ExperimentalConfig, ExperimentalConfig,
ModelConfig, ModelConfig,
FocusChatProcessorConfig,
MessageReceiveConfig, MessageReceiveConfig,
MaimMessageConfig, MaimMessageConfig,
LPMMKnowledgeConfig, LPMMKnowledgeConfig,
RelationshipConfig, RelationshipConfig,
ToolConfig,
) )
install(extra_lines=3) install(extra_lines=3)
@@ -50,7 +50,7 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/ # 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/
MMC_VERSION = "0.8.0" MMC_VERSION = "0.8.1-snapshot.1"
def update_config(): def update_config():
@@ -151,7 +151,6 @@ class Config(ConfigBase):
message_receive: MessageReceiveConfig message_receive: MessageReceiveConfig
normal_chat: NormalChatConfig normal_chat: NormalChatConfig
focus_chat: FocusChatConfig focus_chat: FocusChatConfig
focus_chat_processor: FocusChatProcessorConfig
emoji: EmojiConfig emoji: EmojiConfig
expression: ExpressionConfig expression: ExpressionConfig
memory: MemoryConfig memory: MemoryConfig
@@ -165,6 +164,7 @@ class Config(ConfigBase):
model: ModelConfig model: ModelConfig
maim_message: MaimMessageConfig maim_message: MaimMessageConfig
lpmm_knowledge: LPMMKnowledgeConfig lpmm_knowledge: LPMMKnowledgeConfig
tool: ToolConfig
def load_config(config_path: str) -> Config: def load_config(config_path: str) -> Config:

View File

@@ -78,6 +78,12 @@ class ChatConfig(ConfigBase):
max_context_size: int = 18 max_context_size: int = 18
"""上下文长度""" """上下文长度"""
replyer_random_probability: float = 0.5
"""
发言时选择推理模型的概率0-1之间
选择普通模型的概率为 1 - reasoning_normal_model_probability
"""
talk_frequency: float = 1 talk_frequency: float = 1
"""回复频率阈值""" """回复频率阈值"""
@@ -264,12 +270,6 @@ class MessageReceiveConfig(ConfigBase):
class NormalChatConfig(ConfigBase): class NormalChatConfig(ConfigBase):
"""普通聊天配置类""" """普通聊天配置类"""
normal_chat_first_probability: float = 0.3
"""
发言时选择推理模型的概率0-1之间
选择普通模型的概率为 1 - reasoning_normal_model_probability
"""
message_buffer: bool = False message_buffer: bool = False
"""消息缓冲器""" """消息缓冲器"""
@@ -314,15 +314,7 @@ class FocusChatConfig(ConfigBase):
consecutive_replies: float = 1 consecutive_replies: float = 1
"""连续回复能力,值越高,麦麦连续回复的概率越高""" """连续回复能力,值越高,麦麦连续回复的概率越高"""
working_memory_processor: bool = False
@dataclass
class FocusChatProcessorConfig(ConfigBase):
"""专注聊天处理器配置类"""
tool_use_processor: bool = True
"""是否启用工具使用处理器"""
working_memory_processor: bool = True
"""是否启用工作记忆处理器""" """是否启用工作记忆处理器"""
@@ -330,6 +322,9 @@ class FocusChatProcessorConfig(ConfigBase):
class ExpressionConfig(ConfigBase): class ExpressionConfig(ConfigBase):
"""表达配置类""" """表达配置类"""
enable_expression: bool = True
"""是否启用表达方式"""
expression_style: str = "" expression_style: str = ""
"""表达风格""" """表达风格"""
@@ -346,6 +341,17 @@ class ExpressionConfig(ConfigBase):
""" """
@dataclass
class ToolConfig(ConfigBase):
"""工具配置类"""
enable_in_normal_chat: bool = False
"""是否在普通聊天中启用工具"""
enable_in_focus_chat: bool = True
"""是否在专注聊天中启用工具"""
@dataclass @dataclass
class EmojiConfig(ConfigBase): class EmojiConfig(ConfigBase):
"""表情包配置类""" """表情包配置类"""
@@ -644,7 +650,7 @@ class ModelConfig(ConfigBase):
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {}) focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""专注工作记忆模型配置""" """专注工作记忆模型配置"""
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {}) tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""专注工具使用模型配置""" """专注工具使用模型配置"""
planner: dict[str, Any] = field(default_factory=lambda: {}) planner: dict[str, Any] = field(default_factory=lambda: {})

View File

@@ -1,238 +0,0 @@
import random
from src.common.logger import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from typing import List, Tuple
import os
import json
from datetime import datetime
logger = get_logger("expressor")
def init_prompt() -> None:
personality_expression_prompt = """
你的人物设定:{personality}
你说话的表达方式:{expression_style}
请从以上表达方式中总结出这个角色可能的语言风格,你必须严格根据人设引申,不要输出例子
思考回复的特殊内容和情感
思考有没有特殊的梗,一并总结成语言风格
总结成如下格式的规律,总结的内容要详细,但具有概括性:
"xxx"时,可以"xxx", xxx不超过10个字
例如(不要输出例子):
"表示十分惊叹"时,使用"我嘞个xxxx"
"表示讽刺的赞同,不想讲道理"时,使用"对对对"
"想说明某个观点,但懒得明说",使用"懂的都懂"
现在请你概括
"""
Prompt(personality_expression_prompt, "personality_expression_prompt")
class PersonalityExpression:
def __init__(self):
self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.model.replyer_1,
max_tokens=512,
request_type="expressor.learner",
)
self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json")
self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json")
self.max_calculations = 20
def _read_meta_data(self):
if os.path.exists(self.meta_file_path):
try:
with open(self.meta_file_path, "r", encoding="utf-8") as meta_file:
meta_data = json.load(meta_file)
# 检查是否有last_update_time字段
if "last_update_time" not in meta_data:
logger.warning(f"{self.meta_file_path} 中缺少last_update_time字段将重新开始。")
# 清空并重写元数据文件
self._write_meta_data({"last_style_text": None, "count": 0, "last_update_time": None})
# 清空并重写表达文件
if os.path.exists(self.expressions_file_path):
with open(self.expressions_file_path, "w", encoding="utf-8") as expressions_file:
json.dump([], expressions_file, ensure_ascii=False, indent=2)
logger.debug(f"已清空表达文件: {self.expressions_file_path}")
return {"last_style_text": None, "count": 0, "last_update_time": None}
return meta_data
except json.JSONDecodeError:
logger.warning(f"无法解析 {self.meta_file_path} 中的JSON数据将重新开始。")
# 清空并重写元数据文件
self._write_meta_data({"last_style_text": None, "count": 0, "last_update_time": None})
# 清空并重写表达文件
if os.path.exists(self.expressions_file_path):
with open(self.expressions_file_path, "w", encoding="utf-8") as expressions_file:
json.dump([], expressions_file, ensure_ascii=False, indent=2)
logger.debug(f"已清空表达文件: {self.expressions_file_path}")
return {"last_style_text": None, "count": 0, "last_update_time": None}
return {"last_style_text": None, "count": 0, "last_update_time": None}
def _write_meta_data(self, data):
os.makedirs(os.path.dirname(self.meta_file_path), exist_ok=True)
with open(self.meta_file_path, "w", encoding="utf-8") as meta_file:
json.dump(data, meta_file, ensure_ascii=False, indent=2)
async def extract_and_store_personality_expressions(self):
"""
检查data/expression/personality目录不存在则创建。
用peronality变量作为chat_str调用LLM生成表达风格解析后count=100存储到expressions.json。
如果expression_style、personality或identity发生变化则删除旧的expressions.json并重置计数。
对于相同的expression_style最多计算self.max_calculations次。
"""
os.makedirs(os.path.dirname(self.expressions_file_path), exist_ok=True)
current_style_text = global_config.expression.expression_style
current_personality = global_config.personality.personality_core
meta_data = self._read_meta_data()
last_style_text = meta_data.get("last_style_text")
last_personality = meta_data.get("last_personality")
count = meta_data.get("count", 0)
# 检查是否有任何变化
if current_style_text != last_style_text or current_personality != last_personality:
logger.info(
f"检测到变化:\n风格: '{last_style_text}' -> '{current_style_text}'\n人格: '{last_personality}' -> '{current_personality}'"
)
count = 0
if os.path.exists(self.expressions_file_path):
try:
os.remove(self.expressions_file_path)
logger.info(f"已删除旧的表达文件: {self.expressions_file_path}")
except OSError as e:
logger.error(f"删除旧的表达文件 {self.expressions_file_path} 失败: {e}")
if count >= self.max_calculations:
logger.debug(f"对于当前配置已达到最大计算次数 ({self.max_calculations})。跳过提取。")
# 即使跳过,也更新元数据以反映当前配置已被识别且计数已满
self._write_meta_data(
{
"last_style_text": current_style_text,
"last_personality": current_personality,
"count": count,
"last_update_time": meta_data.get("last_update_time"),
}
)
return
# 构建prompt
prompt = await global_prompt_manager.format_prompt(
"personality_expression_prompt",
personality=current_personality,
expression_style=current_style_text,
)
try:
response, _ = await self.express_learn_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"个性表达方式提取失败: {e}")
# 如果提取失败,保存当前的配置和未增加的计数
self._write_meta_data(
{
"last_style_text": current_style_text,
"last_personality": current_personality,
"count": count,
"last_update_time": meta_data.get("last_update_time"),
}
)
return
logger.info(f"个性表达方式提取response: {response}")
# 转为dict并count=100
if response != "":
expressions = self.parse_expression_response(response, "personality")
# 读取已有的表达方式
existing_expressions = []
if os.path.exists(self.expressions_file_path):
try:
with open(self.expressions_file_path, "r", encoding="utf-8") as f:
existing_expressions = json.load(f)
except (json.JSONDecodeError, FileNotFoundError):
logger.warning(f"无法读取或解析 {self.expressions_file_path},将创建新的表达文件。")
# 创建新的表达方式
new_expressions = []
for _, situation, style in expressions:
new_expressions.append({"situation": situation, "style": style, "count": 1})
# 合并表达方式如果situation和style相同则累加count
merged_expressions = existing_expressions.copy()
for new_expr in new_expressions:
found = False
for existing_expr in merged_expressions:
if (
existing_expr["situation"] == new_expr["situation"]
and existing_expr["style"] == new_expr["style"]
):
existing_expr["count"] += new_expr["count"]
found = True
break
if not found:
merged_expressions.append(new_expr)
# 超过50条时随机删除多余的只保留50条
if len(merged_expressions) > 50:
remove_count = len(merged_expressions) - 50
remove_indices = set(random.sample(range(len(merged_expressions)), remove_count))
merged_expressions = [item for idx, item in enumerate(merged_expressions) if idx not in remove_indices]
with open(self.expressions_file_path, "w", encoding="utf-8") as f:
json.dump(merged_expressions, f, ensure_ascii=False, indent=2)
logger.info(f"已写入{len(merged_expressions)}条表达到{self.expressions_file_path}")
# 成功提取后更新元数据
count += 1
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._write_meta_data(
{
"last_style_text": current_style_text,
"last_personality": current_personality,
"count": count,
"last_update_time": current_time,
}
)
logger.info(f"成功处理。当前配置的计数现在是 {count},最后更新时间:{current_time}")
else:
logger.warning(f"个性表达方式提取失败,模型返回空内容: {response}")
def parse_expression_response(self, response: str, chat_id: str) -> List[Tuple[str, str, str]]:
"""
解析LLM返回的表达风格总结每一行提取"""使用"之间的内容,存储为(situation, style)元组
"""
expressions: List[Tuple[str, str, str]] = []
for line in response.splitlines():
line = line.strip()
if not line:
continue
# 查找"当"和下一个引号
idx_when = line.find('"')
if idx_when == -1:
continue
idx_quote1 = idx_when + 1
idx_quote2 = line.find('"', idx_quote1 + 1)
if idx_quote2 == -1:
continue
situation = line[idx_quote1 + 1 : idx_quote2]
# 查找"使用"
idx_use = line.find('使用"', idx_quote2)
if idx_use == -1:
continue
idx_quote3 = idx_use + 2
idx_quote4 = line.find('"', idx_quote3 + 1)
if idx_quote4 == -1:
continue
style = line[idx_quote3 + 1 : idx_quote4]
expressions.append((chat_id, situation, style))
return expressions
init_prompt()

View File

@@ -1,11 +1,9 @@
from typing import Optional from typing import Optional
import asyncio
import ast import ast
from src.llm_models.utils_model import LLMRequest from src.llm_models.utils_model import LLMRequest
from .personality import Personality from .personality import Personality
from .identity import Identity from .identity import Identity
from .expression_style import PersonalityExpression
import random import random
import json import json
import os import os
@@ -27,7 +25,6 @@ class Individuality:
# 正常初始化实例属性 # 正常初始化实例属性
self.personality: Optional[Personality] = None self.personality: Optional[Personality] = None
self.identity: Optional[Identity] = None self.identity: Optional[Identity] = None
self.express_style: PersonalityExpression = PersonalityExpression()
self.name = "" self.name = ""
self.bot_person_id = "" self.bot_person_id = ""
@@ -151,8 +148,6 @@ class Individuality:
else: else:
logger.error("人设构建失败") logger.error("人设构建失败")
asyncio.create_task(self.express_style.extract_and_store_personality_expressions())
def to_dict(self) -> dict: def to_dict(self) -> dict:
"""将个体特征转换为字典格式""" """将个体特征转换为字典格式"""
return { return {

View File

@@ -102,7 +102,8 @@ class LLMRequest:
"o3", "o3",
"o3-2025-04-16", "o3-2025-04-16",
"o3-mini", "o3-mini",
"o3-mini-2025-01-31o4-mini", "o3-mini-2025-01-31",
"o4-mini",
"o4-mini-2025-04-16", "o4-mini-2025-04-16",
] ]

View File

@@ -19,7 +19,7 @@ from src.common.logger import get_logger
from src.individuality.individuality import get_individuality, Individuality from src.individuality.individuality import get_individuality, Individuality
from src.common.server import get_global_server, Server from src.common.server import get_global_server, Server
from rich.traceback import install from rich.traceback import install
from src.api.main import start_api_server # from src.api.main import start_api_server
# 导入新的插件管理器 # 导入新的插件管理器
from src.plugin_system.core.plugin_manager import plugin_manager from src.plugin_system.core.plugin_manager import plugin_manager
@@ -85,8 +85,8 @@ class MainSystem:
await async_task_manager.add_task(TelemetryHeartBeatTask()) await async_task_manager.add_task(TelemetryHeartBeatTask())
# 启动API服务器 # 启动API服务器
start_api_server() # start_api_server()
logger.info("API服务器启动成功") # logger.info("API服务器启动成功")
# 加载所有actions包括默认的和插件的 # 加载所有actions包括默认的和插件的
plugin_count, component_count = plugin_manager.load_all_plugins() plugin_count, component_count = plugin_manager.load_all_plugins()
@@ -205,7 +205,7 @@ class MainSystem:
expression_learner = get_expression_learner() expression_learner = get_expression_learner()
while True: while True:
await asyncio.sleep(global_config.expression.learning_interval) await asyncio.sleep(global_config.expression.learning_interval)
if global_config.expression.enable_expression_learning: if global_config.expression.enable_expression_learning and global_config.expression.enable_expression:
logger.info("[表达方式学习] 开始学习表达方式...") logger.info("[表达方式学习] 开始学习表达方式...")
await expression_learner.learn_and_store_expression() await expression_learner.learn_and_store_expression()
logger.info("[表达方式学习] 表达方式学习完成") logger.info("[表达方式学习] 表达方式学习完成")

View File

@@ -0,0 +1,380 @@
import asyncio
import time
import random
from typing import Optional, Dict, Tuple # 导入类型提示
from maim_message import UserInfo, Seg
from src.common.logger import get_logger
from src.chat.message_receive.chat_stream import ChatStream, get_chat_manager
from .s4u_stream_generator import S4UStreamGenerator
from src.chat.message_receive.message import MessageSending, MessageRecv
from src.config.config import global_config
from src.common.message.api import get_global_api
from src.chat.message_receive.storage import MessageStorage
logger = get_logger("S4U_chat")
class MessageSenderContainer:
"""一个简单的容器,用于按顺序发送消息并模拟打字效果。"""
def __init__(self, chat_stream: ChatStream, original_message: MessageRecv):
self.chat_stream = chat_stream
self.original_message = original_message
self.queue = asyncio.Queue()
self.storage = MessageStorage()
self._task: Optional[asyncio.Task] = None
self._paused_event = asyncio.Event()
self._paused_event.set() # 默认设置为非暂停状态
async def add_message(self, chunk: str):
"""向队列中添加一个消息块。"""
await self.queue.put(chunk)
async def close(self):
"""表示没有更多消息了,关闭队列。"""
await self.queue.put(None) # Sentinel
def pause(self):
"""暂停发送。"""
self._paused_event.clear()
def resume(self):
"""恢复发送。"""
self._paused_event.set()
def _calculate_typing_delay(self, text: str) -> float:
"""根据文本长度计算模拟打字延迟。"""
chars_per_second = 15.0
min_delay = 0.2
max_delay = 2.0
delay = len(text) / chars_per_second
return max(min_delay, min(delay, max_delay))
async def _send_worker(self):
"""从队列中取出消息并发送。"""
while True:
try:
# This structure ensures that task_done() is called for every item retrieved,
# even if the worker is cancelled while processing the item.
chunk = await self.queue.get()
except asyncio.CancelledError:
break
try:
if chunk is None:
break
# Check for pause signal *after* getting an item.
await self._paused_event.wait()
# delay = self._calculate_typing_delay(chunk)
delay = 0.1
await asyncio.sleep(delay)
current_time = time.time()
msg_id = f"{current_time}_{random.randint(1000, 9999)}"
text_to_send = chunk
if global_config.experimental.debug_show_chat_mode:
text_to_send += ""
message_segment = Seg(type="text", data=text_to_send)
bot_message = MessageSending(
message_id=msg_id,
chat_stream=self.chat_stream,
bot_user_info=UserInfo(
user_id=global_config.bot.qq_account,
user_nickname=global_config.bot.nickname,
platform=self.original_message.message_info.platform,
),
sender_info=self.original_message.message_info.user_info,
message_segment=message_segment,
reply=self.original_message,
is_emoji=False,
apply_set_reply_logic=True,
reply_to=f"{self.original_message.message_info.user_info.platform}:{self.original_message.message_info.user_info.user_id}",
)
await bot_message.process()
await get_global_api().send_message(bot_message)
logger.info(f"已将消息 '{text_to_send}' 发往平台 '{bot_message.message_info.platform}'")
await self.storage.store_message(bot_message, self.chat_stream)
except Exception as e:
logger.error(f"[{self.chat_stream.get_stream_name()}] 消息发送或存储时出现错误: {e}", exc_info=True)
finally:
# CRUCIAL: Always call task_done() for any item that was successfully retrieved.
self.queue.task_done()
def start(self):
"""启动发送任务。"""
if self._task is None:
self._task = asyncio.create_task(self._send_worker())
async def join(self):
"""等待所有消息发送完毕。"""
if self._task:
await self._task
class S4UChatManager:
def __init__(self):
self.s4u_chats: Dict[str, "S4UChat"] = {}
def get_or_create_chat(self, chat_stream: ChatStream) -> "S4UChat":
if chat_stream.stream_id not in self.s4u_chats:
stream_name = get_chat_manager().get_stream_name(chat_stream.stream_id) or chat_stream.stream_id
logger.info(f"Creating new S4UChat for stream: {stream_name}")
self.s4u_chats[chat_stream.stream_id] = S4UChat(chat_stream)
return self.s4u_chats[chat_stream.stream_id]
s4u_chat_manager = S4UChatManager()
def get_s4u_chat_manager() -> S4UChatManager:
return s4u_chat_manager
class S4UChat:
_MESSAGE_TIMEOUT_SECONDS = 60 # 普通消息存活时间(秒)
def __init__(self, chat_stream: ChatStream):
"""初始化 S4UChat 实例。"""
self.chat_stream = chat_stream
self.stream_id = chat_stream.stream_id
self.stream_name = get_chat_manager().get_stream_name(self.stream_id) or self.stream_id
# 两个消息队列
self._vip_queue = asyncio.PriorityQueue()
self._normal_queue = asyncio.PriorityQueue()
self._entry_counter = 0 # 保证FIFO的全局计数器
self._new_message_event = asyncio.Event() # 用于唤醒处理器
self._processing_task = asyncio.create_task(self._message_processor())
self._current_generation_task: Optional[asyncio.Task] = None
# 当前消息的元数据:(队列类型, 优先级分数, 计数器, 消息对象)
self._current_message_being_replied: Optional[Tuple[str, float, int, MessageRecv]] = None
self._is_replying = False
self.gpt = S4UStreamGenerator()
self.interest_dict: Dict[str, float] = {} # 用户兴趣分
self.at_bot_priority_bonus = 100.0 # @机器人的优先级加成
self.normal_queue_max_size = 50 # 普通队列最大容量
logger.info(f"[{self.stream_name}] S4UChat with two-queue system initialized.")
def _is_vip(self, message: MessageRecv) -> bool:
"""检查消息是否来自VIP用户。"""
# 您需要修改此处或在配置文件中定义VIP用户
vip_user_ids = ["1026294844"]
vip_user_ids = [""]
return message.message_info.user_info.user_id in vip_user_ids
def _get_interest_score(self, user_id: str) -> float:
"""获取用户的兴趣分默认为1.0"""
return self.interest_dict.get(user_id, 1.0)
def _calculate_base_priority_score(self, message: MessageRecv) -> float:
"""
为消息计算基础优先级分数。分数越高,优先级越高。
"""
score = 0.0
# 如果消息 @ 了机器人,则增加一个很大的分数
if f"@{global_config.bot.nickname}" in message.processed_plain_text or any(
f"@{alias}" in message.processed_plain_text for alias in global_config.bot.alias_names
):
score += self.at_bot_priority_bonus
# 加上用户的固有兴趣分
score += self._get_interest_score(message.message_info.user_info.user_id)
return score
async def add_message(self, message: MessageRecv) -> None:
"""根据VIP状态和中断逻辑将消息放入相应队列。"""
is_vip = self._is_vip(message)
new_priority_score = self._calculate_base_priority_score(message)
should_interrupt = False
if self._current_generation_task and not self._current_generation_task.done():
if self._current_message_being_replied:
current_queue, current_priority, _, current_msg = self._current_message_being_replied
# 规则VIP从不被打断
if current_queue == "vip":
pass # Do nothing
# 规则:普通消息可以被打断
elif current_queue == "normal":
# VIP消息可以打断普通消息
if is_vip:
should_interrupt = True
logger.info(f"[{self.stream_name}] VIP message received, interrupting current normal task.")
# 普通消息的内部打断逻辑
else:
new_sender_id = message.message_info.user_info.user_id
current_sender_id = current_msg.message_info.user_info.user_id
# 新消息优先级更高
if new_priority_score > current_priority:
should_interrupt = True
logger.info(f"[{self.stream_name}] New normal message has higher priority, interrupting.")
# 同用户,新消息的优先级不能更低
elif new_sender_id == current_sender_id and new_priority_score >= current_priority:
should_interrupt = True
logger.info(f"[{self.stream_name}] Same user sent new message, interrupting.")
if should_interrupt:
if self.gpt.partial_response:
logger.warning(
f"[{self.stream_name}] Interrupting reply. Already generated: '{self.gpt.partial_response}'"
)
self._current_generation_task.cancel()
# asyncio.PriorityQueue 是最小堆,所以我们存入分数的相反数
# 这样,原始分数越高的消息,在队列中的优先级数字越小,越靠前
item = (-new_priority_score, self._entry_counter, time.time(), message)
if is_vip:
await self._vip_queue.put(item)
logger.info(f"[{self.stream_name}] VIP message added to queue.")
else:
# 应用普通队列的最大容量限制
if self._normal_queue.qsize() >= self.normal_queue_max_size:
# 队列已满,简单忽略新消息
# 更复杂的逻辑(如替换掉队列中优先级最低的)对于 asyncio.PriorityQueue 来说实现复杂
logger.debug(
f"[{self.stream_name}] Normal queue is full, ignoring new message from {message.message_info.user_info.user_id}"
)
return
await self._normal_queue.put(item)
self._entry_counter += 1
self._new_message_event.set() # 唤醒处理器
async def _message_processor(self):
"""调度器优先处理VIP队列然后处理普通队列。"""
while True:
try:
# 等待有新消息的信号,避免空转
await self._new_message_event.wait()
self._new_message_event.clear()
# 优先处理VIP队列
if not self._vip_queue.empty():
neg_priority, entry_count, _, message = self._vip_queue.get_nowait()
priority = -neg_priority
queue_name = "vip"
# 其次处理普通队列
elif not self._normal_queue.empty():
neg_priority, entry_count, timestamp, message = self._normal_queue.get_nowait()
priority = -neg_priority
# 检查普通消息是否超时
if time.time() - timestamp > self._MESSAGE_TIMEOUT_SECONDS:
logger.info(
f"[{self.stream_name}] Discarding stale normal message: {message.processed_plain_text[:20]}..."
)
self._normal_queue.task_done()
continue # 处理下一条
queue_name = "normal"
else:
continue # 没有消息了,回去等事件
self._current_message_being_replied = (queue_name, priority, entry_count, message)
self._current_generation_task = asyncio.create_task(self._generate_and_send(message))
try:
await self._current_generation_task
except asyncio.CancelledError:
logger.info(
f"[{self.stream_name}] Reply generation was interrupted externally for {queue_name} message. The message will be discarded."
)
# 被中断的消息应该被丢弃,而不是重新排队,以响应最新的用户输入。
# 旧的重新入队逻辑会导致所有中断的消息最终都被回复。
except Exception as e:
logger.error(f"[{self.stream_name}] _generate_and_send task error: {e}", exc_info=True)
finally:
self._current_generation_task = None
self._current_message_being_replied = None
# 标记任务完成
if queue_name == "vip":
self._vip_queue.task_done()
else:
self._normal_queue.task_done()
# 检查是否还有任务,有则立即再次触发事件
if not self._vip_queue.empty() or not self._normal_queue.empty():
self._new_message_event.set()
except asyncio.CancelledError:
logger.info(f"[{self.stream_name}] Message processor is shutting down.")
break
except Exception as e:
logger.error(f"[{self.stream_name}] Message processor main loop error: {e}", exc_info=True)
await asyncio.sleep(1)
async def _generate_and_send(self, message: MessageRecv):
"""为单个消息生成文本和音频回复。整个过程可以被中断。"""
self._is_replying = True
sender_container = MessageSenderContainer(self.chat_stream, message)
sender_container.start()
try:
logger.info(f"[S4U] 开始为消息生成文本和音频流: '{message.processed_plain_text[:30]}...'")
# 1. 逐句生成文本、发送并播放音频
gen = self.gpt.generate_response(message, "")
async for chunk in gen:
# 如果任务被取消await 会在此处引发 CancelledError
# a. 发送文本块
await sender_container.add_message(chunk)
# b. 为该文本块生成并播放音频
# if chunk.strip():
# audio_data = await self.audio_generator.generate(chunk)
# player = MockAudioPlayer(audio_data)
# await player.play()
# 等待所有文本消息发送完成
await sender_container.close()
await sender_container.join()
logger.info(f"[{self.stream_name}] 所有文本和音频块处理完毕。")
except asyncio.CancelledError:
logger.info(f"[{self.stream_name}] 回复流程(文本或音频)被中断。")
raise # 将取消异常向上传播
except Exception as e:
logger.error(f"[{self.stream_name}] 回复生成过程中出现错误: {e}", exc_info=True)
finally:
self._is_replying = False
# 确保发送器被妥善关闭(即使已关闭,再次调用也是安全的)
sender_container.resume()
if not sender_container._task.done():
await sender_container.close()
await sender_container.join()
logger.info(f"[{self.stream_name}] _generate_and_send 任务结束,资源已清理。")
async def shutdown(self):
"""平滑关闭处理任务。"""
logger.info(f"正在关闭 S4UChat: {self.stream_name}")
# 取消正在运行的任务
if self._current_generation_task and not self._current_generation_task.done():
self._current_generation_task.cancel()
if self._processing_task and not self._processing_task.done():
self._processing_task.cancel()
# 等待任务响应取消
try:
await self._processing_task
except asyncio.CancelledError:
logger.info(f"处理任务已成功取消: {self.stream_name}")

View File

@@ -0,0 +1,57 @@
from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.storage import MessageStorage
from src.chat.message_receive.chat_stream import get_chat_manager
from src.common.logger import get_logger
from .s4u_chat import get_s4u_chat_manager
# from ..message_receive.message_buffer import message_buffer
logger = get_logger("chat")
class S4UMessageProcessor:
"""心流处理器,负责处理接收到的消息并计算兴趣度"""
def __init__(self):
"""初始化心流处理器,创建消息存储实例"""
self.storage = MessageStorage()
async def process_message(self, message: MessageRecv) -> None:
"""处理接收到的原始消息数据
主要流程:
1. 消息解析与初始化
2. 消息缓冲处理
3. 过滤检查
4. 兴趣度计算
5. 关系处理
Args:
message_data: 原始消息字符串
"""
target_user_id_list = ["1026294844", "964959351"]
# 1. 消息解析与初始化
groupinfo = message.message_info.group_info
userinfo = message.message_info.user_info
messageinfo = message.message_info
chat = await get_chat_manager().get_or_create_stream(
platform=messageinfo.platform,
user_info=userinfo,
group_info=groupinfo,
)
await self.storage.store_message(message, chat)
s4u_chat = get_s4u_chat_manager().get_or_create_chat(chat)
if userinfo.user_id in target_user_id_list:
await s4u_chat.add_message(message)
else:
await s4u_chat.add_message(message)
# 7. 日志记录
logger.info(f"[S4U]{userinfo.user_nickname}:{message.processed_plain_text}")

View File

@@ -0,0 +1,270 @@
from src.config.config import global_config
from src.common.logger import get_logger
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
import time
from src.chat.utils.utils import get_recent_group_speaker
from src.chat.memory_system.Hippocampus import hippocampus_manager
import random
from datetime import datetime
import asyncio
import ast
from src.person_info.person_info import get_person_info_manager
from src.person_info.relationship_manager import get_relationship_manager
logger = get_logger("prompt")
def init_prompt():
Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
Prompt("在群里聊天", "chat_target_group2")
Prompt("{sender_name}私聊", "chat_target_private2")
Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
Prompt("\n关于你们的关系,你需要知道:\n{relation_info}\n", "relation_prompt")
Prompt("你回想起了一些事情:\n{memory_info}\n", "memory_prompt")
Prompt(
"""{identity_block}
{relation_info_block}
{memory_block}
你现在的主要任务是和 {sender_name} 聊天。同时,也有其他用户会参与你们的聊天,你可以参考他们的回复内容,但是你主要还是关注你和{sender_name}的聊天内容。
{background_dialogue_prompt}
--------------------------------
{time_block}
这是你和{sender_name}的对话,你们正在交流中:
{core_dialogue_prompt}
对方最新发送的内容:{message_txt}
回复可以简短一些。可以参考贴吧,知乎和微博的回复风格,回复不要浮夸,不要用夸张修辞,平淡一些。
不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容,现在{sender_name}正在等待你的回复。
你的回复风格不要浮夸,有逻辑和条理,请你继续回复{sender_name}
你的发言:
""",
"s4u_prompt", # New template for private CHAT chat
)
class PromptBuilder:
def __init__(self):
self.prompt_built = ""
self.activate_messages = ""
async def build_identity_block(self) -> str:
person_info_manager = get_person_info_manager()
bot_person_id = person_info_manager.get_person_id("system", "bot_id")
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
short_impression = await person_info_manager.get_value(bot_person_id, "short_impression")
try:
if isinstance(short_impression, str) and short_impression.strip():
short_impression = ast.literal_eval(short_impression)
elif not short_impression:
logger.warning("short_impression为空使用默认值")
short_impression = ["友好活泼", "人类"]
except (ValueError, SyntaxError) as e:
logger.error(f"解析short_impression失败: {e}, 原始值: {short_impression}")
short_impression = ["友好活泼", "人类"]
if not isinstance(short_impression, list) or len(short_impression) < 2:
logger.warning(f"short_impression格式不正确: {short_impression}, 使用默认值")
short_impression = ["友好活泼", "人类"]
personality = short_impression[0]
identity = short_impression[1]
prompt_personality = personality + "" + identity
return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
async def build_relation_info(self, chat_stream) -> str:
is_group_chat = bool(chat_stream.group_info)
who_chat_in_group = []
if is_group_chat:
who_chat_in_group = get_recent_group_speaker(
chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
limit=global_config.chat.max_context_size,
)
elif chat_stream.user_info:
who_chat_in_group.append(
(chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
)
relation_prompt = ""
if global_config.relationship.enable_relationship and who_chat_in_group:
relationship_manager = get_relationship_manager()
relation_info_list = await asyncio.gather(
*[relationship_manager.build_relationship_info(person) for person in who_chat_in_group]
)
relation_info = "".join(relation_info_list)
if relation_info:
relation_prompt = await global_prompt_manager.format_prompt(
"relation_prompt", relation_info=relation_info
)
return relation_prompt
async def build_memory_block(self, text: str) -> str:
related_memory = await hippocampus_manager.get_memory_from_text(
text=text, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
)
related_memory_info = ""
if related_memory:
for memory in related_memory:
related_memory_info += memory[1]
return await global_prompt_manager.format_prompt("memory_prompt", memory_info=related_memory_info)
return ""
def build_chat_history_prompts(self, chat_stream, message) -> (str, str):
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=100,
)
talk_type = message.message_info.platform + ":" + message.chat_stream.user_info.user_id
core_dialogue_list = []
background_dialogue_list = []
bot_id = str(global_config.bot.qq_account)
target_user_id = str(message.chat_stream.user_info.user_id)
for msg_dict in message_list_before_now:
try:
msg_user_id = str(msg_dict.get("user_id"))
if msg_user_id == bot_id:
if msg_dict.get("reply_to") and talk_type == msg_dict.get("reply_to"):
core_dialogue_list.append(msg_dict)
else:
background_dialogue_list.append(msg_dict)
elif msg_user_id == target_user_id:
core_dialogue_list.append(msg_dict)
else:
background_dialogue_list.append(msg_dict)
except Exception as e:
logger.error(f"无法处理历史消息记录: {msg_dict}, 错误: {e}")
background_dialogue_prompt = ""
if background_dialogue_list:
latest_25_msgs = background_dialogue_list[-25:]
background_dialogue_prompt_str = build_readable_messages(
latest_25_msgs,
merge_messages=True,
timestamp_mode="normal_no_YMD",
show_pic=False,
)
background_dialogue_prompt = f"这是其他用户的发言:\n{background_dialogue_prompt_str}"
core_msg_str = ""
if core_dialogue_list:
core_dialogue_list = core_dialogue_list[-50:]
first_msg = core_dialogue_list[0]
start_speaking_user_id = first_msg.get("user_id")
if start_speaking_user_id == bot_id:
last_speaking_user_id = bot_id
msg_seg_str = "你的发言:\n"
else:
start_speaking_user_id = target_user_id
last_speaking_user_id = start_speaking_user_id
msg_seg_str = "对方的发言:\n"
msg_seg_str += f"{time.strftime('%H:%M:%S', time.localtime(first_msg.get('time')))}: {first_msg.get('processed_plain_text')}\n"
all_msg_seg_list = []
for msg in core_dialogue_list[1:]:
speaker = msg.get("user_id")
if speaker == last_speaking_user_id:
msg_seg_str += f"{time.strftime('%H:%M:%S', time.localtime(msg.get('time')))}: {msg.get('processed_plain_text')}\n"
else:
msg_seg_str = f"{msg_seg_str}\n"
all_msg_seg_list.append(msg_seg_str)
if speaker == bot_id:
msg_seg_str = "你的发言:\n"
else:
msg_seg_str = "对方的发言:\n"
msg_seg_str += f"{time.strftime('%H:%M:%S', time.localtime(msg.get('time')))}: {msg.get('processed_plain_text')}\n"
last_speaking_user_id = speaker
all_msg_seg_list.append(msg_seg_str)
for msg in all_msg_seg_list:
core_msg_str += msg
return core_msg_str, background_dialogue_prompt
async def build_prompt_normal(
self,
message,
chat_stream,
message_txt: str,
sender_name: str = "某人",
) -> str:
identity_block, relation_info_block, memory_block = await asyncio.gather(
self.build_identity_block(), self.build_relation_info(chat_stream), self.build_memory_block(message_txt)
)
core_dialogue_prompt, background_dialogue_prompt = self.build_chat_history_prompts(chat_stream, message)
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
template_name = "s4u_prompt"
prompt = await global_prompt_manager.format_prompt(
template_name,
identity_block=identity_block,
time_block=time_block,
relation_info_block=relation_info_block,
memory_block=memory_block,
sender_name=sender_name,
core_dialogue_prompt=core_dialogue_prompt,
background_dialogue_prompt=background_dialogue_prompt,
message_txt=message_txt,
)
return prompt
def weighted_sample_no_replacement(items, weights, k) -> list:
"""
加权且不放回地随机抽取k个元素。
参数:
items: 待抽取的元素列表
weights: 每个元素对应的权重与items等长且为正数
k: 需要抽取的元素个数
返回:
selected: 按权重加权且不重复抽取的k个元素组成的列表
如果items中的元素不足k就只会返回所有可用的元素
实现思路:
每次从当前池中按权重加权随机选出一个元素选中后将其从池中移除重复k次。
这样保证了:
1. count越大被选中概率越高
2. 不会重复选中同一个元素
"""
selected = []
pool = list(zip(items, weights))
for _ in range(min(k, len(pool))):
total = sum(w for _, w in pool)
r = random.uniform(0, total)
upto = 0
for idx, (item, weight) in enumerate(pool):
upto += weight
if upto >= r:
selected.append(item)
pool.pop(idx)
break
return selected
init_prompt()
prompt_builder = PromptBuilder()

View File

@@ -0,0 +1,157 @@
import os
from typing import AsyncGenerator
from src.llm_models.utils_model import LLMRequest
from src.mais4u.openai_client import AsyncOpenAIClient
from src.config.config import global_config
from src.chat.message_receive.message import MessageRecv
from src.mais4u.mais4u_chat.s4u_prompt import prompt_builder
from src.common.logger import get_logger
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
import asyncio
import re
logger = get_logger("s4u_stream_generator")
class S4UStreamGenerator:
def __init__(self):
replyer_1_config = global_config.model.replyer_1
provider = replyer_1_config.get("provider")
if not provider:
logger.error("`replyer_1` 在配置文件中缺少 `provider` 字段")
raise ValueError("`replyer_1` 在配置文件中缺少 `provider` 字段")
api_key = os.environ.get(f"{provider.upper()}_KEY")
base_url = os.environ.get(f"{provider.upper()}_BASE_URL")
if not api_key:
logger.error(f"环境变量 {provider.upper()}_KEY 未设置")
raise ValueError(f"环境变量 {provider.upper()}_KEY 未设置")
self.client_1 = AsyncOpenAIClient(api_key=api_key, base_url=base_url)
self.model_1_name = replyer_1_config.get("name")
if not self.model_1_name:
logger.error("`replyer_1` 在配置文件中缺少 `model_name` 字段")
raise ValueError("`replyer_1` 在配置文件中缺少 `model_name` 字段")
self.replyer_1_config = replyer_1_config
self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation")
self.current_model_name = "unknown model"
self.partial_response = ""
# 正则表达式用于按句子切分,同时处理各种标点和边缘情况
# 匹配常见的句子结束符,但会忽略引号内和数字中的标点
self.sentence_split_pattern = re.compile(
r'([^\s\w"\'([{]*["\'([{].*?["\'}\])][^\s\w"\'([{]*|' # 匹配被引号/括号包裹的内容
r'[^.。!?\n\r]+(?:[.。!?\n\r](?![\'"])|$))', # 匹配直到句子结束符
re.UNICODE | re.DOTALL,
)
async def generate_response(
self, message: MessageRecv, previous_reply_context: str = ""
) -> AsyncGenerator[str, None]:
"""根据当前模型类型选择对应的生成函数"""
# 从global_config中获取模型概率值并选择模型
self.partial_response = ""
current_client = self.client_1
self.current_model_name = self.model_1_name
person_id = PersonInfoManager.get_person_id(
message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
)
person_info_manager = get_person_info_manager()
person_name = await person_info_manager.get_value(person_id, "person_name")
if message.chat_stream.user_info.user_nickname:
sender_name = f"[{message.chat_stream.user_info.user_nickname}]你叫ta{person_name}"
else:
sender_name = f"用户({message.chat_stream.user_info.user_id})"
# 构建prompt
if previous_reply_context:
message_txt = f"""
你正在回复用户的消息,但中途被打断了。这是已有的对话上下文:
[你已经对上一条消息说的话]: {previous_reply_context}
---
[这是用户发来的新消息, 你需要结合上下文,对此进行回复]:
{message.processed_plain_text}
"""
else:
message_txt = message.processed_plain_text
prompt = await prompt_builder.build_prompt_normal(
message=message,
message_txt=message_txt,
sender_name=sender_name,
chat_stream=message.chat_stream,
)
logger.info(
f"{self.current_model_name}思考:{message_txt[:30] + '...' if len(message_txt) > 30 else message_txt}"
) # noqa: E501
extra_kwargs = {}
if self.replyer_1_config.get("enable_thinking") is not None:
extra_kwargs["enable_thinking"] = self.replyer_1_config.get("enable_thinking")
if self.replyer_1_config.get("thinking_budget") is not None:
extra_kwargs["thinking_budget"] = self.replyer_1_config.get("thinking_budget")
async for chunk in self._generate_response_with_model(
prompt, current_client, self.current_model_name, **extra_kwargs
):
yield chunk
async def _generate_response_with_model(
self,
prompt: str,
client: AsyncOpenAIClient,
model_name: str,
**kwargs,
) -> AsyncGenerator[str, None]:
print(prompt)
buffer = ""
delimiters = ",。!?,.!?\n\r" # For final trimming
punctuation_buffer = ""
async for content in client.get_stream_content(
messages=[{"role": "user", "content": prompt}], model=model_name, **kwargs
):
buffer += content
# 使用正则表达式匹配句子
last_match_end = 0
for match in self.sentence_split_pattern.finditer(buffer):
sentence = match.group(0).strip()
if sentence:
# 如果句子看起来完整(即不只是等待更多内容),则发送
if match.end(0) < len(buffer) or sentence.endswith(tuple(delimiters)):
# 检查是否只是一个标点符号
if sentence in [",", "", ".", "", "!", "", "?", ""]:
punctuation_buffer += sentence
else:
# 发送之前累积的标点和当前句子
to_yield = punctuation_buffer + sentence
if to_yield.endswith((",", "")):
to_yield = to_yield.rstrip(",")
self.partial_response += to_yield
yield to_yield
punctuation_buffer = "" # 清空标点符号缓冲区
await asyncio.sleep(0) # 允许其他任务运行
last_match_end = match.end(0)
# 从缓冲区移除已发送的部分
if last_match_end > 0:
buffer = buffer[last_match_end:]
# 发送缓冲区中剩余的任何内容
to_yield = (punctuation_buffer + buffer).strip()
if to_yield:
if to_yield.endswith(("", ",")):
to_yield = to_yield.rstrip(",")
if to_yield:
self.partial_response += to_yield
yield to_yield

286
src/mais4u/openai_client.py Normal file
View File

@@ -0,0 +1,286 @@
from typing import AsyncGenerator, Dict, List, Optional, Union
from dataclasses import dataclass
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletion, ChatCompletionChunk
@dataclass
class ChatMessage:
"""聊天消息数据类"""
role: str
content: str
def to_dict(self) -> Dict[str, str]:
return {"role": self.role, "content": self.content}
class AsyncOpenAIClient:
"""异步OpenAI客户端支持流式传输"""
def __init__(self, api_key: str, base_url: Optional[str] = None):
"""
初始化客户端
Args:
api_key: OpenAI API密钥
base_url: 可选的API基础URL用于自定义端点
"""
self.client = AsyncOpenAI(
api_key=api_key,
base_url=base_url,
timeout=10.0, # 设置60秒的全局超时
)
async def chat_completion(
self,
messages: List[Union[ChatMessage, Dict[str, str]]],
model: str = "gpt-3.5-turbo",
temperature: float = 0.7,
max_tokens: Optional[int] = None,
**kwargs,
) -> ChatCompletion:
"""
非流式聊天完成
Args:
messages: 消息列表
model: 模型名称
temperature: 温度参数
max_tokens: 最大token数
**kwargs: 其他参数
Returns:
完整的聊天回复
"""
# 转换消息格式
formatted_messages = []
for msg in messages:
if isinstance(msg, ChatMessage):
formatted_messages.append(msg.to_dict())
else:
formatted_messages.append(msg)
extra_body = {}
if kwargs.get("enable_thinking") is not None:
extra_body["enable_thinking"] = kwargs.pop("enable_thinking")
if kwargs.get("thinking_budget") is not None:
extra_body["thinking_budget"] = kwargs.pop("thinking_budget")
response = await self.client.chat.completions.create(
model=model,
messages=formatted_messages,
temperature=temperature,
max_tokens=max_tokens,
stream=False,
extra_body=extra_body if extra_body else None,
**kwargs,
)
return response
async def chat_completion_stream(
self,
messages: List[Union[ChatMessage, Dict[str, str]]],
model: str = "gpt-3.5-turbo",
temperature: float = 0.7,
max_tokens: Optional[int] = None,
**kwargs,
) -> AsyncGenerator[ChatCompletionChunk, None]:
"""
流式聊天完成
Args:
messages: 消息列表
model: 模型名称
temperature: 温度参数
max_tokens: 最大token数
**kwargs: 其他参数
Yields:
ChatCompletionChunk: 流式响应块
"""
# 转换消息格式
formatted_messages = []
for msg in messages:
if isinstance(msg, ChatMessage):
formatted_messages.append(msg.to_dict())
else:
formatted_messages.append(msg)
extra_body = {}
if kwargs.get("enable_thinking") is not None:
extra_body["enable_thinking"] = kwargs.pop("enable_thinking")
if kwargs.get("thinking_budget") is not None:
extra_body["thinking_budget"] = kwargs.pop("thinking_budget")
stream = await self.client.chat.completions.create(
model=model,
messages=formatted_messages,
temperature=temperature,
max_tokens=max_tokens,
stream=True,
extra_body=extra_body if extra_body else None,
**kwargs,
)
async for chunk in stream:
yield chunk
async def get_stream_content(
self,
messages: List[Union[ChatMessage, Dict[str, str]]],
model: str = "gpt-3.5-turbo",
temperature: float = 0.7,
max_tokens: Optional[int] = None,
**kwargs,
) -> AsyncGenerator[str, None]:
"""
获取流式内容(只返回文本内容)
Args:
messages: 消息列表
model: 模型名称
temperature: 温度参数
max_tokens: 最大token数
**kwargs: 其他参数
Yields:
str: 文本内容片段
"""
async for chunk in self.chat_completion_stream(
messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, **kwargs
):
if chunk.choices and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
async def collect_stream_response(
self,
messages: List[Union[ChatMessage, Dict[str, str]]],
model: str = "gpt-3.5-turbo",
temperature: float = 0.7,
max_tokens: Optional[int] = None,
**kwargs,
) -> str:
"""
收集完整的流式响应
Args:
messages: 消息列表
model: 模型名称
temperature: 温度参数
max_tokens: 最大token数
**kwargs: 其他参数
Returns:
str: 完整的响应文本
"""
full_response = ""
async for content in self.get_stream_content(
messages=messages, model=model, temperature=temperature, max_tokens=max_tokens, **kwargs
):
full_response += content
return full_response
async def close(self):
"""关闭客户端"""
await self.client.close()
async def __aenter__(self):
"""异步上下文管理器入口"""
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""异步上下文管理器退出"""
await self.close()
class ConversationManager:
"""对话管理器,用于管理对话历史"""
def __init__(self, client: AsyncOpenAIClient, system_prompt: Optional[str] = None):
"""
初始化对话管理器
Args:
client: OpenAI客户端实例
system_prompt: 系统提示词
"""
self.client = client
self.messages: List[ChatMessage] = []
if system_prompt:
self.messages.append(ChatMessage(role="system", content=system_prompt))
def add_user_message(self, content: str):
"""添加用户消息"""
self.messages.append(ChatMessage(role="user", content=content))
def add_assistant_message(self, content: str):
"""添加助手消息"""
self.messages.append(ChatMessage(role="assistant", content=content))
async def send_message_stream(
self, content: str, model: str = "gpt-3.5-turbo", **kwargs
) -> AsyncGenerator[str, None]:
"""
发送消息并获取流式响应
Args:
content: 用户消息内容
model: 模型名称
**kwargs: 其他参数
Yields:
str: 响应内容片段
"""
self.add_user_message(content)
response_content = ""
async for chunk in self.client.get_stream_content(messages=self.messages, model=model, **kwargs):
response_content += chunk
yield chunk
self.add_assistant_message(response_content)
async def send_message(self, content: str, model: str = "gpt-3.5-turbo", **kwargs) -> str:
"""
发送消息并获取完整响应
Args:
content: 用户消息内容
model: 模型名称
**kwargs: 其他参数
Returns:
str: 完整响应
"""
self.add_user_message(content)
response = await self.client.chat_completion(messages=self.messages, model=model, **kwargs)
response_content = response.choices[0].message.content
self.add_assistant_message(response_content)
return response_content
def clear_history(self, keep_system: bool = True):
"""
清除对话历史
Args:
keep_system: 是否保留系统消息
"""
if keep_system and self.messages and self.messages[0].role == "system":
self.messages = [self.messages[0]]
else:
self.messages = []
def get_message_count(self) -> int:
"""获取消息数量"""
return len(self.messages)
def get_conversation_history(self) -> List[Dict[str, str]]:
"""获取对话历史"""
return [msg.to_dict() for msg in self.messages]

View File

@@ -23,7 +23,7 @@ def init_real_time_info_prompts():
{name_block} {name_block}
现在,你想要回复{person_name}的消息,消息内容是:{target_message}。请根据聊天记录和你要回复的消息,从你对{person_name}的了解中提取有关的信息: 现在,你想要回复{person_name}的消息,消息内容是:{target_message}。请根据聊天记录和你要回复的消息,从你对{person_name}的了解中提取有关的信息:
1.你需要提供你想要提取的信息具体是哪方面的信息,例如:年龄,性别,对ta的印象,最近发生的事等等。 1.你需要提供你想要提取的信息具体是哪方面的信息,例如:年龄,性别,你们之间的交流方式,最近发生的事等等。
2.请注意,请不要重复调取相同的信息,已经调取的信息如下: 2.请注意,请不要重复调取相同的信息,已经调取的信息如下:
{info_cache_block} {info_cache_block}
3.如果当前聊天记录中没有需要查询的信息,或者现有信息已经足够回复,请返回{{"none": "不需要查询"}} 3.如果当前聊天记录中没有需要查询的信息,或者现有信息已经足够回复,请返回{{"none": "不需要查询"}}
@@ -71,13 +71,13 @@ class RelationshipFetcher:
# LLM模型配置 # LLM模型配置
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.relation, model=global_config.model.relation,
request_type="focus.real_time_info", request_type="relation",
) )
# 小模型用于即时信息提取 # 小模型用于即时信息提取
self.instant_llm_model = LLMRequest( self.instant_llm_model = LLMRequest(
model=global_config.model.utils_small, model=global_config.model.utils_small,
request_type="focus.real_time_info.instant", request_type="relation.instant",
) )
name = get_chat_manager().get_stream_name(self.chat_id) name = get_chat_manager().get_stream_name(self.chat_id)

View File

@@ -8,6 +8,7 @@
success, reply_set = await generator_api.generate_reply(chat_stream, action_data, reasoning) success, reply_set = await generator_api.generate_reply(chat_stream, action_data, reasoning)
""" """
import traceback
from typing import Tuple, Any, Dict, List, Optional from typing import Tuple, Any, Dict, List, Optional
from src.common.logger import get_logger from src.common.logger import get_logger
from src.chat.replyer.default_generator import DefaultReplyer from src.chat.replyer.default_generator import DefaultReplyer
@@ -26,6 +27,7 @@ logger = get_logger("generator_api")
def get_replyer( def get_replyer(
chat_stream: Optional[ChatStream] = None, chat_stream: Optional[ChatStream] = None,
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
enable_tool: bool = False,
model_configs: Optional[List[Dict[str, Any]]] = None, model_configs: Optional[List[Dict[str, Any]]] = None,
request_type: str = "replyer", request_type: str = "replyer",
) -> Optional[DefaultReplyer]: ) -> Optional[DefaultReplyer]:
@@ -46,10 +48,15 @@ def get_replyer(
try: try:
logger.debug(f"[GeneratorAPI] 正在获取回复器chat_id: {chat_id}, chat_stream: {'' if chat_stream else ''}") logger.debug(f"[GeneratorAPI] 正在获取回复器chat_id: {chat_id}, chat_stream: {'' if chat_stream else ''}")
return replyer_manager.get_replyer( return replyer_manager.get_replyer(
chat_stream=chat_stream, chat_id=chat_id, model_configs=model_configs, request_type=request_type chat_stream=chat_stream,
chat_id=chat_id,
model_configs=model_configs,
request_type=request_type,
enable_tool=enable_tool,
) )
except Exception as e: except Exception as e:
logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True) logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True)
traceback.print_exc()
return None return None
@@ -64,9 +71,9 @@ async def generate_reply(
action_data: Dict[str, Any] = None, action_data: Dict[str, Any] = None,
reply_to: str = "", reply_to: str = "",
relation_info: str = "", relation_info: str = "",
structured_info: str = "",
extra_info: str = "", extra_info: str = "",
available_actions: List[str] = None, available_actions: List[str] = None,
enable_tool: bool = False,
enable_splitter: bool = True, enable_splitter: bool = True,
enable_chinese_typo: bool = True, enable_chinese_typo: bool = True,
return_prompt: bool = False, return_prompt: bool = False,
@@ -87,7 +94,9 @@ async def generate_reply(
""" """
try: try:
# 获取回复器 # 获取回复器
replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type) replyer = get_replyer(
chat_stream, chat_id, model_configs=model_configs, request_type=request_type, enable_tool=enable_tool
)
if not replyer: if not replyer:
logger.error("[GeneratorAPI] 无法获取回复器") logger.error("[GeneratorAPI] 无法获取回复器")
return False, [] return False, []
@@ -99,7 +108,6 @@ async def generate_reply(
reply_data=action_data or {}, reply_data=action_data or {},
reply_to=reply_to, reply_to=reply_to,
relation_info=relation_info, relation_info=relation_info,
structured_info=structured_info,
extra_info=extra_info, extra_info=extra_info,
available_actions=available_actions, available_actions=available_actions,
) )

View File

@@ -22,6 +22,7 @@
import traceback import traceback
import time import time
import difflib import difflib
import re
from typing import Optional, Union from typing import Optional, Union
from src.common.logger import get_logger from src.common.logger import get_logger
@@ -171,7 +172,41 @@ async def _find_reply_message(target_stream, reply_to: str) -> Optional[MessageR
person_id = get_person_info_manager().get_person_id(platform, user_id) person_id = get_person_info_manager().get_person_id(platform, user_id)
person_name = await get_person_info_manager().get_value(person_id, "person_name") person_name = await get_person_info_manager().get_value(person_id, "person_name")
if person_name == sender: if person_name == sender:
similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio() translate_text = message["processed_plain_text"]
# 检查是否有 回复<aaa:bbb> 字段
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
match = re.search(reply_pattern, translate_text)
if match:
aaa = match.group(1)
bbb = match.group(2)
reply_person_id = get_person_info_manager().get_person_id(platform, bbb)
reply_person_name = await get_person_info_manager().get_value(reply_person_id, "person_name")
if not reply_person_name:
reply_person_name = aaa
# 在内容前加上回复信息
translate_text = re.sub(reply_pattern, f"回复 {reply_person_name}", translate_text, count=1)
# 检查是否有 @<aaa:bbb> 字段
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
at_matches = list(re.finditer(at_pattern, translate_text))
if at_matches:
new_content = ""
last_end = 0
for m in at_matches:
new_content += translate_text[last_end : m.start()]
aaa = m.group(1)
bbb = m.group(2)
at_person_id = get_person_info_manager().get_person_id(platform, bbb)
at_person_name = await get_person_info_manager().get_value(at_person_id, "person_name")
if not at_person_name:
at_person_name = aaa
new_content += f"@{at_person_name}"
last_end = m.end()
new_content += translate_text[last_end:]
translate_text = new_content
similarity = difflib.SequenceMatcher(None, text, translate_text).ratio()
if similarity >= 0.9: if similarity >= 0.9:
find_msg = message find_msg = message
break break

View File

@@ -17,9 +17,27 @@ logger = get_logger("manifest_utils")
class VersionComparator: class VersionComparator:
"""版本号比较器 """版本号比较器
支持语义化版本号比较自动处理snapshot版本 支持语义化版本号比较自动处理snapshot版本,并支持向前兼容性检查
""" """
# 版本兼容性映射表(硬编码)
# 格式: {插件最大支持版本: [实际兼容的版本列表]}
COMPATIBILITY_MAP = {
# 0.8.x 系列向前兼容规则
"0.8.0": ["0.8.1", "0.8.2", "0.8.3", "0.8.4", "0.8.5", "0.8.6", "0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.1": ["0.8.2", "0.8.3", "0.8.4", "0.8.5", "0.8.6", "0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.2": ["0.8.3", "0.8.4", "0.8.5", "0.8.6", "0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.3": ["0.8.4", "0.8.5", "0.8.6", "0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.4": ["0.8.5", "0.8.6", "0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.5": ["0.8.6", "0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.6": ["0.8.7", "0.8.8", "0.8.9", "0.8.10"],
"0.8.7": ["0.8.8", "0.8.9", "0.8.10"],
"0.8.8": ["0.8.9", "0.8.10"],
"0.8.9": ["0.8.10"],
# 可以根据需要添加更多兼容映射
# "0.9.0": ["0.9.1", "0.9.2", "0.9.3"], # 示例0.9.x系列兼容
}
@staticmethod @staticmethod
def normalize_version(version: str) -> str: def normalize_version(version: str) -> str:
"""标准化版本号移除snapshot标识 """标准化版本号移除snapshot标识
@@ -88,9 +106,31 @@ class VersionComparator:
else: else:
return 0 return 0
@staticmethod
def check_forward_compatibility(current_version: str, max_version: str) -> Tuple[bool, str]:
"""检查向前兼容性(仅使用兼容性映射表)
Args:
current_version: 当前版本
max_version: 插件声明的最大支持版本
Returns:
Tuple[bool, str]: (是否兼容, 兼容信息)
"""
current_normalized = VersionComparator.normalize_version(current_version)
max_normalized = VersionComparator.normalize_version(max_version)
# 检查兼容性映射表
if max_normalized in VersionComparator.COMPATIBILITY_MAP:
compatible_versions = VersionComparator.COMPATIBILITY_MAP[max_normalized]
if current_normalized in compatible_versions:
return True, f"根据兼容性映射表,版本 {current_normalized}{max_normalized} 兼容"
return False, ""
@staticmethod @staticmethod
def is_version_in_range(version: str, min_version: str = "", max_version: str = "") -> Tuple[bool, str]: def is_version_in_range(version: str, min_version: str = "", max_version: str = "") -> Tuple[bool, str]:
"""检查版本是否在指定范围内 """检查版本是否在指定范围内,支持兼容性检查
Args: Args:
version: 要检查的版本号 version: 要检查的版本号
@@ -98,7 +138,7 @@ class VersionComparator:
max_version: 最大版本号(可选) max_version: 最大版本号(可选)
Returns: Returns:
Tuple[bool, str]: (是否兼容, 错误信息) Tuple[bool, str]: (是否兼容, 错误信息或兼容信息)
""" """
if not min_version and not max_version: if not min_version and not max_version:
return True, "" return True, ""
@@ -114,8 +154,19 @@ class VersionComparator:
# 检查最大版本 # 检查最大版本
if max_version: if max_version:
max_normalized = VersionComparator.normalize_version(max_version) max_normalized = VersionComparator.normalize_version(max_version)
if VersionComparator.compare_versions(version_normalized, max_normalized) > 0: comparison = VersionComparator.compare_versions(version_normalized, max_normalized)
return False, f"版本 {version_normalized} 高于最大支持版本 {max_normalized}"
if comparison > 0:
# 严格版本检查失败,尝试兼容性检查
is_compatible, compat_msg = VersionComparator.check_forward_compatibility(
version_normalized, max_normalized
)
if is_compatible:
logger.info(f"版本兼容性检查:{compat_msg}")
return True, compat_msg
else:
return False, f"版本 {version_normalized} 高于最大支持版本 {max_normalized},且无兼容性映射"
return True, "" return True, ""
@@ -128,6 +179,29 @@ class VersionComparator:
""" """
return VersionComparator.normalize_version(MMC_VERSION) return VersionComparator.normalize_version(MMC_VERSION)
@staticmethod
def add_compatibility_mapping(base_version: str, compatible_versions: list) -> None:
"""动态添加兼容性映射
Args:
base_version: 基础版本(插件声明的最大支持版本)
compatible_versions: 兼容的版本列表
"""
base_normalized = VersionComparator.normalize_version(base_version)
VersionComparator.COMPATIBILITY_MAP[base_normalized] = [
VersionComparator.normalize_version(v) for v in compatible_versions
]
logger.info(f"添加兼容性映射:{base_normalized} -> {compatible_versions}")
@staticmethod
def get_compatibility_info() -> Dict[str, list]:
"""获取当前的兼容性映射表
Returns:
Dict[str, list]: 兼容性映射表的副本
"""
return VersionComparator.COMPATIBILITY_MAP.copy()
class ManifestValidator: class ManifestValidator:
"""Manifest文件验证器""" """Manifest文件验证器"""

View File

@@ -10,8 +10,7 @@
"license": "GPL-v3.0-or-later", "license": "GPL-v3.0-or-later",
"host_application": { "host_application": {
"min_version": "0.8.0", "min_version": "0.8.0"
"max_version": "0.8.0"
}, },
"homepage_url": "https://github.com/MaiM-with-u/maibot", "homepage_url": "https://github.com/MaiM-with-u/maibot",
"repository_url": "https://github.com/MaiM-with-u/maibot", "repository_url": "https://github.com/MaiM-with-u/maibot",

View File

@@ -0,0 +1,84 @@
from typing import Tuple
# 导入新插件系统
from src.plugin_system import BaseAction, ActionActivationType, ChatMode
# 导入依赖的系统组件
from src.common.logger import get_logger
# 导入API模块 - 标准Python包方式
from src.plugin_system.apis import emoji_api
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
logger = get_logger("core_actions")
class EmojiAction(BaseAction):
"""表情动作 - 发送表情包"""
# 激活设置
focus_activation_type = ActionActivationType.LLM_JUDGE
normal_activation_type = ActionActivationType.RANDOM
mode_enable = ChatMode.ALL
parallel_action = True
random_activation_probability = 0.2 # 默认值,可通过配置覆盖
# 动作基本信息
action_name = "emoji"
action_description = "发送表情包辅助表达情绪"
# LLM判断提示词
llm_judge_prompt = """
判定是否需要使用表情动作的条件:
1. 用户明确要求使用表情包
2. 这是一个适合表达强烈情绪的场合
3. 不要发送太多表情包,如果你已经发送过多个表情包则回答""
请回答""""
"""
# 动作参数定义
action_parameters = {"description": "文字描述你想要发送的表情包内容"}
# 动作使用场景
action_require = [
"发送表情包辅助表达情绪",
"表达情绪时可以选择使用",
"不要连续发送,如果你已经发过[表情包],就不要选择此动作",
]
# 关联类型
associated_types = ["emoji"]
async def execute(self) -> Tuple[bool, str]:
"""执行表情动作"""
logger.info(f"{self.log_prefix} 决定发送表情")
try:
# 1. 根据描述选择表情包
description = self.action_data.get("description", "")
emoji_result = await emoji_api.get_by_description(description)
if not emoji_result:
logger.warning(f"{self.log_prefix} 未找到匹配描述 '{description}' 的表情包")
return False, f"未找到匹配 '{description}' 的表情包"
emoji_base64, emoji_description, matched_emotion = emoji_result
logger.info(f"{self.log_prefix} 找到表情包: {emoji_description}, 匹配情感: {matched_emotion}")
# 使用BaseAction的便捷方法发送表情包
success = await self.send_emoji(emoji_base64)
if not success:
logger.error(f"{self.log_prefix} 表情包发送失败")
return False, "表情包发送失败"
# 重置NoReplyAction的连续计数器
NoReplyAction.reset_consecutive_count()
return True, f"发送表情包: {emoji_description}"
except Exception as e:
logger.error(f"{self.log_prefix} 表情动作执行失败: {e}")
return False, f"表情发送失败: {str(e)}"

View File

@@ -18,8 +18,9 @@ from src.config.config import global_config
from src.common.logger import get_logger from src.common.logger import get_logger
# 导入API模块 - 标准Python包方式 # 导入API模块 - 标准Python包方式
from src.plugin_system.apis import emoji_api, generator_api, message_api from src.plugin_system.apis import generator_api, message_api
from src.plugins.built_in.core_actions.no_reply import NoReplyAction from src.plugins.built_in.core_actions.no_reply import NoReplyAction
from src.plugins.built_in.core_actions.emoji import EmojiAction
logger = get_logger("core_actions") logger = get_logger("core_actions")
@@ -63,6 +64,7 @@ class ReplyAction(BaseAction):
action_data=self.action_data, action_data=self.action_data,
chat_id=self.chat_id, chat_id=self.chat_id,
request_type="focus.replyer", request_type="focus.replyer",
enable_tool=global_config.tool.enable_in_focus_chat,
) )
# 检查从start_time以来的新消息数量 # 检查从start_time以来的新消息数量
@@ -111,72 +113,6 @@ class ReplyAction(BaseAction):
return False, f"回复失败: {str(e)}" return False, f"回复失败: {str(e)}"
class EmojiAction(BaseAction):
"""表情动作 - 发送表情包"""
# 激活设置
focus_activation_type = ActionActivationType.LLM_JUDGE
normal_activation_type = ActionActivationType.RANDOM
mode_enable = ChatMode.ALL
parallel_action = True
random_activation_probability = 0.2 # 默认值,可通过配置覆盖
# 动作基本信息
action_name = "emoji"
action_description = "发送表情包辅助表达情绪"
# LLM判断提示词
llm_judge_prompt = """
判定是否需要使用表情动作的条件:
1. 用户明确要求使用表情包
2. 这是一个适合表达强烈情绪的场合
3. 不要发送太多表情包,如果你已经发送过多个表情包则回答""
请回答""""
"""
# 动作参数定义
action_parameters = {"description": "文字描述你想要发送的表情包内容"}
# 动作使用场景
action_require = ["表达情绪时可以选择使用", "重点:不要连续发,如果你已经发过[表情包],就不要选择此动作"]
# 关联类型
associated_types = ["emoji"]
async def execute(self) -> Tuple[bool, str]:
"""执行表情动作"""
logger.info(f"{self.log_prefix} 决定发送表情")
try:
# 1. 根据描述选择表情包
description = self.action_data.get("description", "")
emoji_result = await emoji_api.get_by_description(description)
if not emoji_result:
logger.warning(f"{self.log_prefix} 未找到匹配描述 '{description}' 的表情包")
return False, f"未找到匹配 '{description}' 的表情包"
emoji_base64, emoji_description, matched_emotion = emoji_result
logger.info(f"{self.log_prefix} 找到表情包: {emoji_description}, 匹配情感: {matched_emotion}")
# 使用BaseAction的便捷方法发送表情包
success = await self.send_emoji(emoji_base64)
if not success:
logger.error(f"{self.log_prefix} 表情包发送失败")
return False, "表情包发送失败"
# 重置NoReplyAction的连续计数器
NoReplyAction.reset_consecutive_count()
return True, f"发送表情包: {emoji_description}"
except Exception as e:
logger.error(f"{self.log_prefix} 表情动作执行失败: {e}")
return False, f"表情发送失败: {str(e)}"
@register_plugin @register_plugin
class CoreActionsPlugin(BasePlugin): class CoreActionsPlugin(BasePlugin):
"""核心动作插件 """核心动作插件
@@ -205,14 +141,12 @@ class CoreActionsPlugin(BasePlugin):
config_schema = { config_schema = {
"plugin": { "plugin": {
"enabled": ConfigField(type=bool, default=True, description="是否启用插件"), "enabled": ConfigField(type=bool, default=True, description="是否启用插件"),
"config_version": ConfigField(type=str, default="0.1.0", description="配置文件版本"), "config_version": ConfigField(type=str, default="0.3.1", description="配置文件版本"),
}, },
"components": { "components": {
"enable_reply": ConfigField(type=bool, default=True, description="是否启用'回复'动作"), "enable_reply": ConfigField(type=bool, default=True, description="是否启用'回复'动作"),
"enable_no_reply": ConfigField(type=bool, default=True, description="是否启用'不回复'动作"), "enable_no_reply": ConfigField(type=bool, default=True, description="是否启用'不回复'动作"),
"enable_emoji": ConfigField(type=bool, default=True, description="是否启用'表情'动作"), "enable_emoji": ConfigField(type=bool, default=True, description="是否启用'表情'动作"),
"enable_change_to_focus": ConfigField(type=bool, default=True, description="是否启用'切换到专注模式'动作"),
"enable_exit_focus": ConfigField(type=bool, default=True, description="是否启用'退出专注模式'动作"),
}, },
"no_reply": { "no_reply": {
"max_timeout": ConfigField(type=int, default=1200, description="最大等待超时时间(秒)"), "max_timeout": ConfigField(type=int, default=1200, description="最大等待超时时间(秒)"),

View File

@@ -11,7 +11,7 @@
"host_application": { "host_application": {
"min_version": "0.8.0", "min_version": "0.8.0",
"max_version": "0.8.0" "max_version": "0.8.10"
}, },
"homepage_url": "https://github.com/MaiM-with-u/maibot", "homepage_url": "https://github.com/MaiM-with-u/maibot",
"repository_url": "https://github.com/MaiM-with-u/maibot", "repository_url": "https://github.com/MaiM-with-u/maibot",

View File

@@ -10,7 +10,7 @@
"license": "GPL-v3.0-or-later", "license": "GPL-v3.0-or-later",
"host_application": { "host_application": {
"min_version": "0.8.0", "min_version": "0.8.0",
"max_version": "0.8.0" "max_version": "0.8.10"
}, },
"keywords": ["mute", "ban", "moderation", "admin", "management", "group"], "keywords": ["mute", "ban", "moderation", "admin", "management", "group"],
"categories": ["Moderation", "Group Management", "Admin Tools"], "categories": ["Moderation", "Group Management", "Admin Tools"],

View File

@@ -11,7 +11,7 @@
"host_application": { "host_application": {
"min_version": "0.8.0", "min_version": "0.8.0",
"max_version": "0.8.0" "max_version": "0.8.10"
}, },
"homepage_url": "https://github.com/MaiM-with-u/maibot", "homepage_url": "https://github.com/MaiM-with-u/maibot",
"repository_url": "https://github.com/MaiM-with-u/maibot", "repository_url": "https://github.com/MaiM-with-u/maibot",

View File

@@ -10,7 +10,7 @@
"license": "GPL-v3.0-or-later", "license": "GPL-v3.0-or-later",
"host_application": { "host_application": {
"min_version": "0.8.0", "min_version": "0.8.0",
"max_version": "0.8.0" "max_version": "0.8.10"
}, },
"keywords": ["vtb", "vtuber", "emotion", "expression", "virtual", "streamer"], "keywords": ["vtb", "vtuber", "emotion", "expression", "virtual", "streamer"],
"categories": ["Entertainment", "Virtual Assistant", "Emotion"], "categories": ["Entertainment", "Virtual Assistant", "Emotion"],

View File

@@ -1,4 +1,4 @@
from src.tools.tool_can_use.base_tool import BaseTool, register_tool from src.tools.tool_can_use.base_tool import BaseTool
from src.person_info.person_info import get_person_info_manager from src.person_info.person_info import get_person_info_manager
from src.common.logger import get_logger from src.common.logger import get_logger
import time import time
@@ -102,7 +102,3 @@ class RenamePersonTool(BaseTool):
error_msg = f"重命名失败: {str(e)}" error_msg = f"重命名失败: {str(e)}"
logger.error(error_msg, exc_info=True) logger.error(error_msg, exc_info=True)
return {"type": "info_error", "id": f"rename_error_{time.time()}", "content": error_msg} return {"type": "info_error", "id": f"rename_error_{time.time()}", "content": error_msg}
# 注册工具
register_tool(RenamePersonTool)

404
src/tools/tool_executor.py Normal file
View File

@@ -0,0 +1,404 @@
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
import time
from src.common.logger import get_logger
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.tools.tool_use import ToolUser
from src.chat.utils.json_utils import process_llm_tool_calls
from typing import List, Dict, Tuple, Optional
logger = get_logger("tool_executor")
def init_tool_executor_prompt():
"""初始化工具执行器的提示词"""
tool_executor_prompt = """
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}
群里正在进行的聊天内容:
{chat_history}
现在,{sender}发送了内容:{target_message},你想要回复ta。
请仔细分析聊天内容,考虑以下几点:
1. 内容中是否包含需要查询信息的问题
2. 是否有明确的工具使用指令
If you need to use a tool, please directly call the corresponding tool function. If you do not need to use any tool, simply output "No tool needed".
"""
Prompt(tool_executor_prompt, "tool_executor_prompt")
class ToolExecutor:
"""独立的工具执行器组件
可以直接输入聊天消息内容,自动判断并执行相应的工具,返回结构化的工具执行结果。
"""
def __init__(self, chat_id: str = None, enable_cache: bool = True, cache_ttl: int = 3):
"""初始化工具执行器
Args:
executor_id: 执行器标识符,用于日志记录
enable_cache: 是否启用缓存机制
cache_ttl: 缓存生存时间(周期数)
"""
self.chat_id = chat_id
self.log_prefix = f"[ToolExecutor:{self.chat_id}] "
self.llm_model = LLMRequest(
model=global_config.model.tool_use,
request_type="tool_executor",
)
# 初始化工具实例
self.tool_instance = ToolUser()
# 缓存配置
self.enable_cache = enable_cache
self.cache_ttl = cache_ttl
self.tool_cache = {} # 格式: {cache_key: {"result": result, "ttl": ttl, "timestamp": timestamp}}
logger.info(f"{self.log_prefix}工具执行器初始化完成,缓存{'启用' if enable_cache else '禁用'}TTL={cache_ttl}")
async def execute_from_chat_message(
self, target_message: str, chat_history: list[str], sender: str, return_details: bool = False
) -> List[Dict] | Tuple[List[Dict], List[str], str]:
"""从聊天消息执行工具
Args:
target_message: 目标消息内容
chat_history: 聊天历史
sender: 发送者
return_details: 是否返回详细信息(使用的工具列表和提示词)
Returns:
如果return_details为False: List[Dict] - 工具执行结果列表
如果return_details为True: Tuple[List[Dict], List[str], str] - (结果列表, 使用的工具, 提示词)
"""
# 首先检查缓存
cache_key = self._generate_cache_key(target_message, chat_history, sender)
cached_result = self._get_from_cache(cache_key)
if cached_result:
logger.info(f"{self.log_prefix}使用缓存结果,跳过工具执行")
if return_details:
# 从缓存结果中提取工具名称
used_tools = [result.get("tool_name", "unknown") for result in cached_result]
return cached_result, used_tools, "使用缓存结果"
else:
return cached_result
# 缓存未命中,执行工具调用
# 获取可用工具
tools = self.tool_instance._define_tools()
# 获取当前时间
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
bot_name = global_config.bot.nickname
# 构建工具调用提示词
prompt = await global_prompt_manager.format_prompt(
"tool_executor_prompt",
target_message=target_message,
chat_history=chat_history,
sender=sender,
bot_name=bot_name,
time_now=time_now,
)
logger.debug(f"{self.log_prefix}开始LLM工具调用分析")
# 调用LLM进行工具决策
response, other_info = await self.llm_model.generate_response_async(prompt=prompt, tools=tools)
# 解析LLM响应
if len(other_info) == 3:
reasoning_content, model_name, tool_calls = other_info
else:
reasoning_content, model_name = other_info
tool_calls = None
# 执行工具调用
tool_results, used_tools = await self._execute_tool_calls(tool_calls)
# 缓存结果
if tool_results:
self._set_cache(cache_key, tool_results)
logger.info(f"{self.log_prefix}工具执行完成,共执行{len(used_tools)}个工具: {used_tools}")
if return_details:
return tool_results, used_tools, prompt
else:
return tool_results
async def _execute_tool_calls(self, tool_calls) -> Tuple[List[Dict], List[str]]:
"""执行工具调用
Args:
tool_calls: LLM返回的工具调用列表
Returns:
Tuple[List[Dict], List[str]]: (工具执行结果列表, 使用的工具名称列表)
"""
tool_results = []
used_tools = []
if not tool_calls:
logger.debug(f"{self.log_prefix}无需执行工具")
return tool_results, used_tools
logger.info(f"{self.log_prefix}开始执行工具调用: {tool_calls}")
# 处理工具调用
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
if not success:
logger.error(f"{self.log_prefix}工具调用解析失败: {error_msg}")
return tool_results, used_tools
if not valid_tool_calls:
logger.debug(f"{self.log_prefix}无有效工具调用")
return tool_results, used_tools
# 执行每个工具调用
for tool_call in valid_tool_calls:
try:
tool_name = tool_call.get("name", "unknown_tool")
used_tools.append(tool_name)
logger.debug(f"{self.log_prefix}执行工具: {tool_name}")
# 执行工具
result = await self.tool_instance._execute_tool_call(tool_call)
if result:
tool_info = {
"type": result.get("type", "unknown_type"),
"id": result.get("id", f"tool_exec_{time.time()}"),
"content": result.get("content", ""),
"tool_name": tool_name,
"timestamp": time.time(),
}
tool_results.append(tool_info)
logger.info(f"{self.log_prefix}工具{tool_name}执行成功,类型: {tool_info['type']}")
logger.debug(f"{self.log_prefix}工具{tool_name}结果内容: {tool_info['content'][:200]}...")
except Exception as e:
logger.error(f"{self.log_prefix}工具{tool_name}执行失败: {e}")
# 添加错误信息到结果中
error_info = {
"type": "tool_error",
"id": f"tool_error_{time.time()}",
"content": f"工具{tool_name}执行失败: {str(e)}",
"tool_name": tool_name,
"timestamp": time.time(),
}
tool_results.append(error_info)
return tool_results, used_tools
def _generate_cache_key(self, target_message: str, chat_history: list[str], sender: str) -> str:
"""生成缓存键
Args:
target_message: 目标消息内容
chat_history: 聊天历史
sender: 发送者
Returns:
str: 缓存键
"""
import hashlib
# 使用消息内容和群聊状态生成唯一缓存键
content = f"{target_message}_{chat_history}_{sender}"
return hashlib.md5(content.encode()).hexdigest()
def _get_from_cache(self, cache_key: str) -> Optional[List[Dict]]:
"""从缓存获取结果
Args:
cache_key: 缓存键
Returns:
Optional[List[Dict]]: 缓存的结果如果不存在或过期则返回None
"""
if not self.enable_cache or cache_key not in self.tool_cache:
return None
cache_item = self.tool_cache[cache_key]
if cache_item["ttl"] <= 0:
# 缓存过期,删除
del self.tool_cache[cache_key]
logger.debug(f"{self.log_prefix}缓存过期,删除缓存键: {cache_key}")
return None
# 减少TTL
cache_item["ttl"] -= 1
logger.debug(f"{self.log_prefix}使用缓存结果剩余TTL: {cache_item['ttl']}")
return cache_item["result"]
def _set_cache(self, cache_key: str, result: List[Dict]):
"""设置缓存
Args:
cache_key: 缓存键
result: 要缓存的结果
"""
if not self.enable_cache:
return
self.tool_cache[cache_key] = {"result": result, "ttl": self.cache_ttl, "timestamp": time.time()}
logger.debug(f"{self.log_prefix}设置缓存TTL: {self.cache_ttl}")
def _cleanup_expired_cache(self):
"""清理过期的缓存"""
if not self.enable_cache:
return
expired_keys = []
for cache_key, cache_item in self.tool_cache.items():
if cache_item["ttl"] <= 0:
expired_keys.append(cache_key)
for key in expired_keys:
del self.tool_cache[key]
if expired_keys:
logger.debug(f"{self.log_prefix}清理了{len(expired_keys)}个过期缓存")
def get_available_tools(self) -> List[str]:
"""获取可用工具列表
Returns:
List[str]: 可用工具名称列表
"""
tools = self.tool_instance._define_tools()
return [tool.get("function", {}).get("name", "unknown") for tool in tools]
async def execute_specific_tool(
self, tool_name: str, tool_args: Dict, validate_args: bool = True
) -> Optional[Dict]:
"""直接执行指定工具
Args:
tool_name: 工具名称
tool_args: 工具参数
validate_args: 是否验证参数
Returns:
Optional[Dict]: 工具执行结果失败时返回None
"""
try:
tool_call = {"name": tool_name, "arguments": tool_args}
logger.info(f"{self.log_prefix}直接执行工具: {tool_name}")
result = await self.tool_instance._execute_tool_call(tool_call)
if result:
tool_info = {
"type": result.get("type", "unknown_type"),
"id": result.get("id", f"direct_tool_{time.time()}"),
"content": result.get("content", ""),
"tool_name": tool_name,
"timestamp": time.time(),
}
logger.info(f"{self.log_prefix}直接工具执行成功: {tool_name}")
return tool_info
except Exception as e:
logger.error(f"{self.log_prefix}直接工具执行失败 {tool_name}: {e}")
return None
def clear_cache(self):
"""清空所有缓存"""
if self.enable_cache:
cache_count = len(self.tool_cache)
self.tool_cache.clear()
logger.info(f"{self.log_prefix}清空了{cache_count}个缓存项")
def get_cache_status(self) -> Dict:
"""获取缓存状态信息
Returns:
Dict: 包含缓存统计信息的字典
"""
if not self.enable_cache:
return {"enabled": False, "cache_count": 0}
# 清理过期缓存
self._cleanup_expired_cache()
total_count = len(self.tool_cache)
ttl_distribution = {}
for cache_item in self.tool_cache.values():
ttl = cache_item["ttl"]
ttl_distribution[ttl] = ttl_distribution.get(ttl, 0) + 1
return {
"enabled": True,
"cache_count": total_count,
"cache_ttl": self.cache_ttl,
"ttl_distribution": ttl_distribution,
}
def set_cache_config(self, enable_cache: bool = None, cache_ttl: int = None):
"""动态修改缓存配置
Args:
enable_cache: 是否启用缓存
cache_ttl: 缓存TTL
"""
if enable_cache is not None:
self.enable_cache = enable_cache
logger.info(f"{self.log_prefix}缓存状态修改为: {'启用' if enable_cache else '禁用'}")
if cache_ttl is not None and cache_ttl > 0:
self.cache_ttl = cache_ttl
logger.info(f"{self.log_prefix}缓存TTL修改为: {cache_ttl}")
# 初始化提示词
init_tool_executor_prompt()
"""
使用示例:
# 1. 基础使用 - 从聊天消息执行工具启用缓存默认TTL=3
executor = ToolExecutor(executor_id="my_executor")
results = await executor.execute_from_chat_message(
talking_message_str="今天天气怎么样?现在几点了?",
is_group_chat=False
)
# 2. 禁用缓存的执行器
no_cache_executor = ToolExecutor(executor_id="no_cache", enable_cache=False)
# 3. 自定义缓存TTL
long_cache_executor = ToolExecutor(executor_id="long_cache", cache_ttl=10)
# 4. 获取详细信息
results, used_tools, prompt = await executor.execute_from_chat_message(
talking_message_str="帮我查询Python相关知识",
is_group_chat=False,
return_details=True
)
# 5. 直接执行特定工具
result = await executor.execute_specific_tool(
tool_name="get_knowledge",
tool_args={"query": "机器学习"}
)
# 6. 缓存管理
available_tools = executor.get_available_tools()
cache_status = executor.get_cache_status() # 查看缓存状态
executor.clear_cache() # 清空缓存
executor.set_cache_config(cache_ttl=5) # 动态修改缓存配置
"""

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "2.29.0" version = "3.1.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更
@@ -44,6 +44,7 @@ compress_indentity = true # 是否压缩身份,压缩后会精简身份信息
[expression] [expression]
# 表达方式 # 表达方式
enable_expression = true # 是否启用表达方式
expression_style = "描述麦麦说话的表达风格,表达习惯,例如:(请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。)" expression_style = "描述麦麦说话的表达风格,表达习惯,例如:(请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景。)"
enable_expression_learning = false # 是否启用表达学习,麦麦会学习不同群里人类说话风格(群之间不互通) enable_expression_learning = false # 是否启用表达学习,麦麦会学习不同群里人类说话风格(群之间不互通)
learning_interval = 600 # 学习间隔 单位秒 learning_interval = 600 # 学习间隔 单位秒
@@ -66,6 +67,8 @@ chat_mode = "normal" # 聊天模式 —— 普通模式normal专注模式
max_context_size = 18 # 上下文长度 max_context_size = 18 # 上下文长度
replyer_random_probability = 0.5 # 首要replyer模型被选择的概率
talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁 talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁
time_based_talk_frequency = ["8:00,1", "12:00,1.5", "18:00,2", "01:00,0.5"] time_based_talk_frequency = ["8:00,1", "12:00,1.5", "18:00,2", "01:00,0.5"]
@@ -113,7 +116,6 @@ ban_msgs_regex = [
[normal_chat] #普通聊天 [normal_chat] #普通聊天
#一般回复参数 #一般回复参数
normal_chat_first_probability = 0.5 # 麦麦回答时选择首要模型的概率与之相对的次要模型的概率为1 - normal_chat_first_probability
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发 emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发
thinking_timeout = 120 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢 thinking_timeout = 120 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢
@@ -133,11 +135,12 @@ think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高 consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高
compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
[focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗
tool_use_processor = false # 是否启用工具使用处理器
working_memory_processor = false # 是否启用工作记忆处理器,消耗量大 working_memory_processor = false # 是否启用工作记忆处理器,消耗量大
[tool]
enable_in_normal_chat = false # 是否在普通聊天中启用工具
enable_in_focus_chat = true # 是否在专注聊天中启用工具
[emoji] [emoji]
max_reg_num = 60 # 表情包最大注册数量 max_reg_num = 60 # 表情包最大注册数量
do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包 do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
@@ -268,7 +271,7 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
#默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数 #默认temp 0.2 如果你使用的是老V3或者其他模型请自己修改temp参数
temp = 0.2 #模型的温度新V3建议0.1-0.3 temp = 0.2 #模型的温度新V3建议0.1-0.3
[model.replyer_2] # 一般聊天模式的次要回复模型 [model.replyer_2] # 次要回复模型
name = "Pro/deepseek-ai/DeepSeek-R1" name = "Pro/deepseek-ai/DeepSeek-R1"
provider = "SILICONFLOW" provider = "SILICONFLOW"
pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗) pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗)
@@ -305,6 +308,13 @@ pri_out = 2.8
temp = 0.7 temp = 0.7
enable_thinking = false # 是否启用思考 enable_thinking = false # 是否启用思考
[model.tool_use] #工具调用模型,需要使用支持工具调用的模型
name = "Qwen/Qwen3-14B"
provider = "SILICONFLOW"
pri_in = 0.5
pri_out = 2
temp = 0.7
enable_thinking = false # 是否启用思考qwen3 only
#嵌入模型 #嵌入模型
[model.embedding] [model.embedding]
@@ -324,15 +334,6 @@ pri_out = 2.8
temp = 0.7 temp = 0.7
[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型
name = "Qwen/Qwen3-14B"
provider = "SILICONFLOW"
pri_in = 0.5
pri_out = 2
temp = 0.7
enable_thinking = false # 是否启用思考qwen3 only
#------------LPMM知识库模型------------ #------------LPMM知识库模型------------
[model.lpmm_entity_extract] # 实体提取模型 [model.lpmm_entity_extract] # 实体提取模型