This commit is contained in:
tcmofashi
2025-06-02 21:09:53 +08:00
31 changed files with 1714 additions and 384 deletions

View File

@@ -1,5 +1,15 @@
# Changelog # Changelog
## [0.7.1] -2025-6-2
- 修复关键词功能并且在focus中可用
- 更新planner架构大大加快速度和表现效果建议使用simple规划器
- 修复log出错问题
- 修复focus吞第一条消息问题
- 可关闭聊天规划处理器(建议关闭)
## [0.7.0] -2025-6-1 ## [0.7.0] -2025-6-1
- 你可以选择normal,focus和auto多种不同的聊天方式。normal提供更少的消耗更快的回复速度。focus提供更好的聊天理解更多工具使用和插件能力 - 你可以选择normal,focus和auto多种不同的聊天方式。normal提供更少的消耗更快的回复速度。focus提供更好的聊天理解更多工具使用和插件能力
- 现在,你可以自定义麦麦的表达方式,并且麦麦也可以学习群友的聊天风格(需要在配置文件中打开) - 现在,你可以自定义麦麦的表达方式,并且麦麦也可以学习群友的聊天风格(需要在配置文件中打开)

View File

@@ -395,7 +395,7 @@ class DefaultExpressor:
thinking_start_time = time.time() thinking_start_time = time.time()
if thinking_start_time is None: if thinking_start_time is None:
logger.error(f"[{stream_name}]思考过程未找到或已结束,无法发送回复。") logger.error(f"[{stream_name}]expressor思考过程未找到或已结束,无法发送回复。")
return None return None
mark_head = False mark_head = False

View File

@@ -24,10 +24,11 @@ from src.chat.heart_flow.observation.structure_observation import StructureObser
from src.chat.heart_flow.observation.actions_observation import ActionObservation from src.chat.heart_flow.observation.actions_observation import ActionObservation
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
from src.chat.focus_chat.memory_activator import MemoryActivator from src.chat.focus_chat.memory_activator import MemoryActivator
from src.chat.focus_chat.info_processors.base_processor import BaseProcessor from src.chat.focus_chat.info_processors.base_processor import BaseProcessor
from src.chat.focus_chat.info_processors.self_processor import SelfProcessor from src.chat.focus_chat.info_processors.self_processor import SelfProcessor
from src.chat.focus_chat.planners.planner import ActionPlanner from src.chat.focus_chat.planners.planner_factory import PlannerFactory
from src.chat.focus_chat.planners.modify_actions import ActionModifier from src.chat.focus_chat.planners.modify_actions import ActionModifier
from src.chat.focus_chat.planners.action_manager import ActionManager from src.chat.focus_chat.planners.action_manager import ActionManager
from src.chat.focus_chat.working_memory.working_memory import WorkingMemory from src.chat.focus_chat.working_memory.working_memory import WorkingMemory
@@ -119,8 +120,11 @@ class HeartFChatting:
self._register_default_processors() self._register_default_processors()
self.expressor = DefaultExpressor(chat_id=self.stream_id) self.expressor = DefaultExpressor(chat_id=self.stream_id)
self.replyer = DefaultReplyer(chat_id=self.stream_id)
self.action_manager = ActionManager() self.action_manager = ActionManager()
self.action_planner = ActionPlanner(log_prefix=self.log_prefix, action_manager=self.action_manager) self.action_planner = PlannerFactory.create_planner(
log_prefix=self.log_prefix, action_manager=self.action_manager
)
self.action_modifier = ActionModifier(action_manager=self.action_manager) self.action_modifier = ActionModifier(action_manager=self.action_manager)
self.action_observation = ActionObservation(observe_id=self.stream_id) self.action_observation = ActionObservation(observe_id=self.stream_id)
@@ -167,8 +171,10 @@ class HeartFChatting:
try: try:
await self.expressor.initialize() await self.expressor.initialize()
await self.replyer.initialize()
self.chat_stream = await asyncio.to_thread(chat_manager.get_stream, self.stream_id) self.chat_stream = await asyncio.to_thread(chat_manager.get_stream, self.stream_id)
self.expressor.chat_stream = self.chat_stream self.expressor.chat_stream = self.chat_stream
self.replyer.chat_stream = self.chat_stream
self.log_prefix = f"[{chat_manager.get_stream_name(self.stream_id) or self.stream_id}]" self.log_prefix = f"[{chat_manager.get_stream_name(self.stream_id) or self.stream_id}]"
except Exception as e: except Exception as e:
logger.error(f"[HFC:{self.stream_id}] 初始化HFC时发生错误: {e}") logger.error(f"[HFC:{self.stream_id}] 初始化HFC时发生错误: {e}")
@@ -583,6 +589,7 @@ class HeartFChatting:
thinking_id=thinking_id, thinking_id=thinking_id,
observations=self.all_observations, observations=self.all_observations,
expressor=self.expressor, expressor=self.expressor,
replyer=self.replyer,
chat_stream=self.chat_stream, chat_stream=self.chat_stream,
log_prefix=self.log_prefix, log_prefix=self.log_prefix,
shutting_down=self._shutting_down, shutting_down=self._shutting_down,

View File

@@ -108,9 +108,7 @@ class WorkingMemoryProcessor(BaseProcessor):
memory_summary = memory.summary memory_summary = memory.summary
memory_id = memory.id memory_id = memory.id
memory_brief = memory_summary.get("brief") memory_brief = memory_summary.get("brief")
# memory_detailed = memory_summary.get("detailed") memory_keypoints = memory_summary.get("key_points", [])
memory_keypoints = memory_summary.get("keypoints")
memory_events = memory_summary.get("events")
memory_single_prompt = f"记忆id:{memory_id},记忆摘要:{memory_brief}\n" memory_single_prompt = f"记忆id:{memory_id},记忆摘要:{memory_brief}\n"
memory_prompts.append(memory_single_prompt) memory_prompts.append(memory_single_prompt)
@@ -165,15 +163,9 @@ class WorkingMemoryProcessor(BaseProcessor):
memory_summary = memory.summary memory_summary = memory.summary
memory_id = memory.id memory_id = memory.id
memory_brief = memory_summary.get("brief") memory_brief = memory_summary.get("brief")
# memory_detailed = memory_summary.get("detailed") memory_keypoints = memory_summary.get("key_points", [])
memory_keypoints = memory_summary.get("keypoints")
memory_events = memory_summary.get("events")
for keypoint in memory_keypoints: for keypoint in memory_keypoints:
memory_str += f"记忆要点:{keypoint}\n" memory_str += f"记忆要点:{keypoint}\n"
for event in memory_events:
memory_str += f"记忆事件:{event}\n"
# memory_str += f"记忆摘要:{memory_detailed}\n"
# memory_str += f"记忆主题:{memory_brief}\n"
working_memory_info = WorkingMemoryInfo() working_memory_info = WorkingMemoryInfo()
if memory_str: if memory_str:
@@ -225,7 +217,7 @@ class WorkingMemoryProcessor(BaseProcessor):
logger.debug(f"{self.log_prefix} 异步合并记忆成功: {memory_id1}{memory_id2}...") logger.debug(f"{self.log_prefix} 异步合并记忆成功: {memory_id1}{memory_id2}...")
logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.summary.get('brief')}") logger.debug(f"{self.log_prefix} 合并后的记忆梗概: {merged_memory.summary.get('brief')}")
logger.debug(f"{self.log_prefix} 合并后的记忆详情: {merged_memory.summary.get('detailed')}") logger.debug(f"{self.log_prefix} 合并后的记忆详情: {merged_memory.summary.get('detailed')}")
logger.debug(f"{self.log_prefix} 合并后的记忆要点: {merged_memory.summary.get('keypoints')}") logger.debug(f"{self.log_prefix} 合并后的记忆要点: {merged_memory.summary.get('key_points')}")
logger.debug(f"{self.log_prefix} 合并后的记忆事件: {merged_memory.summary.get('events')}") logger.debug(f"{self.log_prefix} 合并后的记忆事件: {merged_memory.summary.get('events')}")
except Exception as e: except Exception as e:

View File

@@ -118,6 +118,7 @@ class MemoryActivator:
# 只取response的第一个元素字符串 # 只取response的第一个元素字符串
response_str = response[0] response_str = response[0]
print(f"response_str: {response_str[1]}")
keywords = list(get_keywords_from_json(response_str)) keywords = list(get_keywords_from_json(response_str))
# 更新关键词缓存 # 更新关键词缓存

View File

@@ -1,6 +1,7 @@
from typing import Dict, List, Optional, Type, Any from typing import Dict, List, Optional, Type, Any
from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY
from src.chat.heart_flow.observation.observation import Observation from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
from src.common.logger_manager import get_logger from src.common.logger_manager import get_logger
@@ -135,6 +136,7 @@ class ActionManager:
thinking_id: str, thinking_id: str,
observations: List[Observation], observations: List[Observation],
expressor: DefaultExpressor, expressor: DefaultExpressor,
replyer: DefaultReplyer,
chat_stream: ChatStream, chat_stream: ChatStream,
log_prefix: str, log_prefix: str,
shutting_down: bool = False, shutting_down: bool = False,
@@ -150,6 +152,7 @@ class ActionManager:
thinking_id: 思考ID thinking_id: 思考ID
observations: 观察列表 observations: 观察列表
expressor: 表达器 expressor: 表达器
replyer: 回复器
chat_stream: 聊天流 chat_stream: 聊天流
log_prefix: 日志前缀 log_prefix: 日志前缀
shutting_down: 是否正在关闭 shutting_down: 是否正在关闭
@@ -176,6 +179,7 @@ class ActionManager:
thinking_id=thinking_id, thinking_id=thinking_id,
observations=observations, observations=observations,
expressor=expressor, expressor=expressor,
replyer=replyer,
chat_stream=chat_stream, chat_stream=chat_stream,
log_prefix=log_prefix, log_prefix=log_prefix,
shutting_down=shutting_down, shutting_down=shutting_down,

View File

@@ -2,5 +2,6 @@
from . import reply_action # noqa from . import reply_action # noqa
from . import no_reply_action # noqa from . import no_reply_action # noqa
from . import exit_focus_chat_action # noqa from . import exit_focus_chat_action # noqa
from . import emoji_action # noqa
# 在此处添加更多动作模块导入 # 在此处添加更多动作模块导入

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from src.common.logger_manager import get_logger
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.focus_chat.hfc_utils import create_empty_anchor_message
logger = get_logger("action_taken")
@register_action
class EmojiAction(BaseAction):
"""表情动作处理类
处理构建和发送消息表情的动作。
"""
action_name: str = "emoji"
action_description: str = "当你想发送一个表情辅助你的回复表达"
action_parameters: dict[str:str] = {
"description": "文字描述你想要发送的表情",
}
action_require: list[str] = [
"你想要发送一个表情",
"表达情绪时可以选择使用",
"一般在你回复之后可以选择性使用"
]
associated_types: list[str] = ["emoji"]
default = True
def __init__(
self,
action_data: dict,
reasoning: str,
cycle_timers: dict,
thinking_id: str,
observations: List[Observation],
chat_stream: ChatStream,
log_prefix: str,
replyer: DefaultReplyer,
**kwargs,
):
"""初始化回复动作处理器
Args:
action_name: 动作名称
action_data: 动作数据,包含 message, emojis, target 等
reasoning: 执行该动作的理由
cycle_timers: 计时器字典
thinking_id: 思考ID
observations: 观察列表
replyer: 回复器
chat_stream: 聊天流
log_prefix: 日志前缀
"""
super().__init__(action_data, reasoning, cycle_timers, thinking_id)
self.observations = observations
self.replyer = replyer
self.chat_stream = chat_stream
self.log_prefix = log_prefix
async def handle_action(self) -> Tuple[bool, str]:
"""
处理回复动作
Returns:
Tuple[bool, str]: (是否执行成功, 回复文本)
"""
# 注意: 此处可能会使用不同的expressor实现根据任务类型切换不同的回复策略
return await self._handle_reply(
reasoning=self.reasoning,
reply_data=self.action_data,
cycle_timers=self.cycle_timers,
thinking_id=self.thinking_id,
)
async def _handle_reply(
self, reasoning: str, reply_data: dict, cycle_timers: dict, thinking_id: str
) -> tuple[bool, str]:
"""
处理统一的回复动作 - 可包含文本和表情,顺序任意
reply_data格式:
{
"description": "描述你想要发送的表情"
}
"""
logger.info(f"{self.log_prefix} 决定发送表情")
# 从聊天观察获取锚定消息
# chatting_observation: ChattingObservation = next(
# obs for obs in self.observations if isinstance(obs, ChattingObservation)
# )
# if reply_data.get("target"):
# anchor_message = chatting_observation.search_message_by_text(reply_data["target"])
# else:
# anchor_message = None
# 如果没有找到锚点消息,创建一个占位符
# if not anchor_message:
# logger.info(f"{self.log_prefix} 未找到锚点消息,创建占位符")
# anchor_message = await create_empty_anchor_message(
# self.chat_stream.platform, self.chat_stream.group_info, self.chat_stream
# )
# else:
# anchor_message.update_chat_stream(self.chat_stream)
logger.info(f"{self.log_prefix} 为了表情包创建占位符")
anchor_message = await create_empty_anchor_message(
self.chat_stream.platform, self.chat_stream.group_info, self.chat_stream
)
success, reply_set = await self.replyer.deal_emoji(
cycle_timers=cycle_timers,
action_data=reply_data,
anchor_message=anchor_message,
# reasoning=reasoning,
thinking_id=thinking_id,
)
reply_text = ""
for reply in reply_set:
type = reply[0]
data = reply[1]
if type == "text":
reply_text += data
elif type == "emoji":
reply_text += data
return success, reply_text

View File

@@ -22,12 +22,11 @@ class NoReplyAction(BaseAction):
""" """
action_name = "no_reply" action_name = "no_reply"
action_description = "不回复" action_description = "暂时不回复消息"
action_parameters = {} action_parameters = {}
action_require = [ action_require = [
"话题无关/无聊/不感兴趣/不懂",
"聊天记录中最新一条消息是你自己发的且无人回应你",
"你连续发送了太多消息,且无人回复", "你连续发送了太多消息,且无人回复",
"想要休息一下",
] ]
default = True default = True

View File

@@ -0,0 +1,134 @@
import asyncio
import traceback
from src.common.logger_manager import get_logger
from src.chat.utils.timer_calculator import Timer
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
logger = get_logger("action_taken")
# 常量定义
WAITING_TIME_THRESHOLD = 1200 # 等待新消息时间阈值,单位秒
@register_action
class NoReplyAction(BaseAction):
"""不回复动作处理类
处理决定不回复的动作。
"""
action_name = "no_reply"
action_description = "不回复"
action_parameters = {}
action_require = [
"话题无关/无聊/不感兴趣/不懂",
"聊天记录中最新一条消息是你自己发的且无人回应你",
"你连续发送了太多消息,且无人回复",
]
default = True
def __init__(
self,
action_data: dict,
reasoning: str,
cycle_timers: dict,
thinking_id: str,
observations: List[Observation],
log_prefix: str,
shutting_down: bool = False,
**kwargs,
):
"""初始化不回复动作处理器
Args:
action_name: 动作名称
action_data: 动作数据
reasoning: 执行该动作的理由
cycle_timers: 计时器字典
thinking_id: 思考ID
observations: 观察列表
log_prefix: 日志前缀
shutting_down: 是否正在关闭
"""
super().__init__(action_data, reasoning, cycle_timers, thinking_id)
self.observations = observations
self.log_prefix = log_prefix
self._shutting_down = shutting_down
async def handle_action(self) -> Tuple[bool, str]:
"""
处理不回复的情况
工作流程:
1. 等待新消息、超时或关闭信号
2. 根据等待结果更新连续不回复计数
3. 如果达到阈值,触发回调
Returns:
Tuple[bool, str]: (是否执行成功, 空字符串)
"""
logger.info(f"{self.log_prefix} 决定不回复: {self.reasoning}")
observation = self.observations[0] if self.observations else None
try:
with Timer("等待新消息", self.cycle_timers):
# 等待新消息、超时或关闭信号,并获取结果
await self._wait_for_new_message(observation, self.thinking_id, self.log_prefix)
return True, "" # 不回复动作没有回复文本
except asyncio.CancelledError:
logger.info(f"{self.log_prefix} 处理 'no_reply' 时等待被中断 (CancelledError)")
raise
except Exception as e: # 捕获调用管理器或其他地方可能发生的错误
logger.error(f"{self.log_prefix} 处理 'no_reply' 时发生错误: {e}")
logger.error(traceback.format_exc())
return False, ""
async def _wait_for_new_message(self, observation: ChattingObservation, thinking_id: str, log_prefix: str) -> bool:
"""
等待新消息 或 检测到关闭信号
参数:
observation: 观察实例
thinking_id: 思考ID
log_prefix: 日志前缀
返回:
bool: 是否检测到新消息 (如果因关闭信号退出则返回 False)
"""
wait_start_time = asyncio.get_event_loop().time()
while True:
# --- 在每次循环开始时检查关闭标志 ---
if self._shutting_down:
logger.info(f"{log_prefix} 等待新消息时检测到关闭信号,中断等待。")
return False # 表示因为关闭而退出
# -----------------------------------
thinking_id_timestamp = parse_thinking_id_to_timestamp(thinking_id)
# 检查新消息
if await observation.has_new_messages_since(thinking_id_timestamp):
logger.info(f"{log_prefix} 检测到新消息")
return True
# 检查超时 (放在检查新消息和关闭之后)
if asyncio.get_event_loop().time() - wait_start_time > WAITING_TIME_THRESHOLD:
logger.warning(f"{log_prefix} 等待新消息超时({WAITING_TIME_THRESHOLD}秒)")
return False
try:
# 短暂休眠,让其他任务有机会运行,并能更快响应取消或关闭
await asyncio.sleep(0.5) # 缩短休眠时间
except asyncio.CancelledError:
# 如果在休眠时被取消,再次检查关闭标志
# 如果是正常关闭,则不需要警告
if not self._shutting_down:
logger.warning(f"{log_prefix} _wait_for_new_message 的休眠被意外取消")
# 无论如何,重新抛出异常,让上层处理
raise

View File

@@ -45,6 +45,8 @@ class PluginAction(BaseAction):
self._services["expressor"] = kwargs["expressor"] self._services["expressor"] = kwargs["expressor"]
if "chat_stream" in kwargs: if "chat_stream" in kwargs:
self._services["chat_stream"] = kwargs["chat_stream"] self._services["chat_stream"] = kwargs["chat_stream"]
if "replyer" in kwargs:
self._services["replyer"] = kwargs["replyer"]
self.log_prefix = kwargs.get("log_prefix", "") self.log_prefix = kwargs.get("log_prefix", "")
self._load_plugin_config() # 初始化时加载插件配置 self._load_plugin_config() # 初始化时加载插件配置

View File

@@ -4,11 +4,10 @@ from src.common.logger_manager import get_logger
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
from src.chat.message_receive.chat_stream import ChatStream from src.chat.message_receive.chat_stream import ChatStream
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.focus_chat.hfc_utils import create_empty_anchor_message from src.chat.focus_chat.hfc_utils import create_empty_anchor_message
from src.config.config import global_config
logger = get_logger("action_taken") logger = get_logger("action_taken")
@@ -21,21 +20,13 @@ class ReplyAction(BaseAction):
""" """
action_name: str = "reply" action_name: str = "reply"
action_description: str = "表达想法,可以只包含文本、表情或两者都有" action_description: str = "当你想要参与回复或者聊天"
action_parameters: dict[str:str] = { action_parameters: dict[str:str] = {
"text": "你想要表达的内容(可选)", "target": "如果你要明确回复特定某人的某句话请在target参数中中指定那句话的原始文本非必须仅文本不包含发送者)(可选)",
"emojis": "描述当前使用表情包的场景,一段话描述(可选)",
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)(可选)",
} }
action_require: list[str] = [ action_require: list[str] = [
"有实质性内容需要表达", "你想要闲聊或者随便附和",
"有人提到你,但你还没有回应他", "有人提到你",
"在合适的时候添加表情(不要总是添加),表情描述要详细,描述当前场景,一段话描述",
"如果你有明确的,要回复特定某人的某句话或者你想回复较早的消息请在target中指定那句话的原始文本",
"一次只回复一个人,一次只回复一个话题,突出重点",
"如果是自己发的消息想继续,需自然衔接",
"避免重复或评价自己的发言,不要和自己聊天",
f"注意你的回复要求:{global_config.expression.expression_style}",
] ]
associated_types: list[str] = ["text", "emoji"] associated_types: list[str] = ["text", "emoji"]
@@ -49,9 +40,9 @@ class ReplyAction(BaseAction):
cycle_timers: dict, cycle_timers: dict,
thinking_id: str, thinking_id: str,
observations: List[Observation], observations: List[Observation],
expressor: DefaultExpressor,
chat_stream: ChatStream, chat_stream: ChatStream,
log_prefix: str, log_prefix: str,
replyer: DefaultReplyer,
**kwargs, **kwargs,
): ):
"""初始化回复动作处理器 """初始化回复动作处理器
@@ -63,13 +54,13 @@ class ReplyAction(BaseAction):
cycle_timers: 计时器字典 cycle_timers: 计时器字典
thinking_id: 思考ID thinking_id: 思考ID
observations: 观察列表 observations: 观察列表
expressor: 表达 replyer: 回复
chat_stream: 聊天流 chat_stream: 聊天流
log_prefix: 日志前缀 log_prefix: 日志前缀
""" """
super().__init__(action_data, reasoning, cycle_timers, thinking_id) super().__init__(action_data, reasoning, cycle_timers, thinking_id)
self.observations = observations self.observations = observations
self.expressor = expressor self.replyer = replyer
self.chat_stream = chat_stream self.chat_stream = chat_stream
self.log_prefix = log_prefix self.log_prefix = log_prefix
@@ -121,7 +112,7 @@ class ReplyAction(BaseAction):
else: else:
anchor_message.update_chat_stream(self.chat_stream) anchor_message.update_chat_stream(self.chat_stream)
success, reply_set = await self.expressor.deal_reply( success, reply_set = await self.replyer.deal_reply(
cycle_timers=cycle_timers, cycle_timers=cycle_timers,
action_data=reply_data, action_data=reply_data,
anchor_message=anchor_message, anchor_message=anchor_message,

View File

@@ -0,0 +1,141 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from src.common.logger_manager import get_logger
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation
from chat.focus_chat.replyer.default_expressor import DefaultExpressor
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.focus_chat.hfc_utils import create_empty_anchor_message
from src.config.config import global_config
logger = get_logger("action_taken")
@register_action
class ReplyAction(BaseAction):
"""回复动作处理类
处理构建和发送消息回复的动作。
"""
action_name: str = "reply"
action_description: str = "表达想法,可以只包含文本、表情或两者都有"
action_parameters: dict[str:str] = {
"text": "你想要表达的内容(可选)",
"emojis": "描述当前使用表情包的场景,一段话描述(可选)",
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)(可选)",
}
action_require: list[str] = [
"有实质性内容需要表达",
"有人提到你,但你还没有回应他",
"在合适的时候添加表情(不要总是添加),表情描述要详细,描述当前场景,一段话描述",
"如果你有明确的,要回复特定某人的某句话或者你想回复较早的消息请在target中指定那句话的原始文本",
"一次只回复一个人,一次只回复一个话题,突出重点",
"如果是自己发的消息想继续,需自然衔接",
"避免重复或评价自己的发言,不要和自己聊天",
f"注意你的回复要求:{global_config.expression.expression_style}",
]
associated_types: list[str] = ["text", "emoji"]
default = True
def __init__(
self,
action_data: dict,
reasoning: str,
cycle_timers: dict,
thinking_id: str,
observations: List[Observation],
expressor: DefaultExpressor,
chat_stream: ChatStream,
log_prefix: str,
**kwargs,
):
"""初始化回复动作处理器
Args:
action_name: 动作名称
action_data: 动作数据,包含 message, emojis, target 等
reasoning: 执行该动作的理由
cycle_timers: 计时器字典
thinking_id: 思考ID
observations: 观察列表
expressor: 表达器
chat_stream: 聊天流
log_prefix: 日志前缀
"""
super().__init__(action_data, reasoning, cycle_timers, thinking_id)
self.observations = observations
self.expressor = expressor
self.chat_stream = chat_stream
self.log_prefix = log_prefix
async def handle_action(self) -> Tuple[bool, str]:
"""
处理回复动作
Returns:
Tuple[bool, str]: (是否执行成功, 回复文本)
"""
# 注意: 此处可能会使用不同的expressor实现根据任务类型切换不同的回复策略
return await self._handle_reply(
reasoning=self.reasoning,
reply_data=self.action_data,
cycle_timers=self.cycle_timers,
thinking_id=self.thinking_id,
)
async def _handle_reply(
self, reasoning: str, reply_data: dict, cycle_timers: dict, thinking_id: str
) -> tuple[bool, str]:
"""
处理统一的回复动作 - 可包含文本和表情,顺序任意
reply_data格式:
{
"text": "你好啊" # 文本内容列表(可选)
"target": "锚定消息", # 锚定消息的文本内容
"emojis": "微笑" # 表情关键词列表(可选)
}
"""
logger.info(f"{self.log_prefix} 决定回复: {self.reasoning}")
# 从聊天观察获取锚定消息
chatting_observation: ChattingObservation = next(
obs for obs in self.observations if isinstance(obs, ChattingObservation)
)
if reply_data.get("target"):
anchor_message = chatting_observation.search_message_by_text(reply_data["target"])
else:
anchor_message = None
# 如果没有找到锚点消息,创建一个占位符
if not anchor_message:
logger.info(f"{self.log_prefix} 未找到锚点消息,创建占位符")
anchor_message = await create_empty_anchor_message(
self.chat_stream.platform, self.chat_stream.group_info, self.chat_stream
)
else:
anchor_message.update_chat_stream(self.chat_stream)
success, reply_set = await self.expressor.deal_reply(
cycle_timers=cycle_timers,
action_data=reply_data,
anchor_message=anchor_message,
reasoning=reasoning,
thinking_id=thinking_id,
)
reply_text = ""
for reply in reply_set:
type = reply[0]
data = reply[1]
if type == "text":
reply_text += data
elif type == "emoji":
reply_text += data
return success, reply_text

View File

@@ -0,0 +1,26 @@
from abc import ABC, abstractmethod
from typing import List, Dict, Any
from src.chat.focus_chat.planners.action_manager import ActionManager
from src.chat.focus_chat.info.info_base import InfoBase
class BasePlanner(ABC):
"""规划器基类"""
def __init__(self, log_prefix: str, action_manager: ActionManager):
self.log_prefix = log_prefix
self.action_manager = action_manager
@abstractmethod
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
规划下一步行动
Args:
all_plan_info: 所有计划信息
running_memorys: 回忆信息
Returns:
Dict[str, Any]: 规划结果
"""
pass

View File

@@ -16,6 +16,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.individuality.individuality import individuality from src.individuality.individuality import individuality
from src.chat.focus_chat.planners.action_manager import ActionManager from src.chat.focus_chat.planners.action_manager import ActionManager
from json_repair import repair_json from json_repair import repair_json
from src.chat.focus_chat.planners.base_planner import BasePlanner
logger = get_logger("planner") logger = get_logger("planner")
@@ -73,9 +74,9 @@ action_name: {action_name}
) )
class ActionPlanner: class ActionPlanner(BasePlanner):
def __init__(self, log_prefix: str, action_manager: ActionManager): def __init__(self, log_prefix: str, action_manager: ActionManager):
self.log_prefix = log_prefix super().__init__(log_prefix, action_manager)
# LLM规划器配置 # LLM规划器配置
self.planner_llm = LLMRequest( self.planner_llm = LLMRequest(
model=global_config.model.focus_planner, model=global_config.model.focus_planner,
@@ -83,8 +84,6 @@ class ActionPlanner:
request_type="focus.planner", # 用于动作规划 request_type="focus.planner", # 用于动作规划
) )
self.action_manager = action_manager
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]]) -> Dict[str, Any]: async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]]) -> Dict[str, Any]:
""" """
规划器 (Planner): 使用LLM根据上下文决定做出什么动作 规划器 (Planner): 使用LLM根据上下文决定做出什么动作
@@ -117,6 +116,7 @@ class ActionPlanner:
cycle_info = "" cycle_info = ""
structured_info = "" structured_info = ""
extra_info = [] extra_info = []
current_mind = ""
observed_messages = [] observed_messages = []
observed_messages_str = "" observed_messages_str = ""
chat_type = "group" chat_type = "group"

View File

@@ -0,0 +1,53 @@
from typing import Dict, Type
from src.chat.focus_chat.planners.base_planner import BasePlanner
from src.chat.focus_chat.planners.planner_complex import ActionPlanner as ComplexActionPlanner
from src.chat.focus_chat.planners.planner_simple import ActionPlanner as SimpleActionPlanner
from src.chat.focus_chat.planners.action_manager import ActionManager
from src.config.config import global_config
from src.common.logger_manager import get_logger
logger = get_logger("planner_factory")
class PlannerFactory:
"""规划器工厂类,用于创建不同类型的规划器实例"""
# 注册所有可用的规划器类型
_planner_types: Dict[str, Type[BasePlanner]] = {
"complex": ComplexActionPlanner,
"simple": SimpleActionPlanner,
}
@classmethod
def register_planner(cls, name: str, planner_class: Type[BasePlanner]) -> None:
"""
注册新的规划器类型
Args:
name: 规划器类型名称
planner_class: 规划器类
"""
cls._planner_types[name] = planner_class
logger.info(f"注册新的规划器类型: {name}")
@classmethod
def create_planner(cls, log_prefix: str, action_manager: ActionManager) -> BasePlanner:
"""
创建规划器实例
Args:
log_prefix: 日志前缀
action_manager: 动作管理器实例
Returns:
BasePlanner: 规划器实例
"""
planner_type = global_config.focus_chat.planner_type
if planner_type not in cls._planner_types:
logger.warning(f"{log_prefix} 未知的规划器类型: {planner_type},使用默认规划器")
planner_type = "complex"
planner_class = cls._planner_types[planner_type]
logger.info(f"{log_prefix} 使用{planner_type}规划器")
return planner_class(log_prefix=log_prefix, action_manager=action_manager)

View File

@@ -0,0 +1,384 @@
import json # <--- 确保导入 json
import traceback
from typing import List, Dict, Any, Optional
from rich.traceback import install
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.obs_info import ObsInfo
from src.chat.focus_chat.info.cycle_info import CycleInfo
from src.chat.focus_chat.info.mind_info import MindInfo
from src.chat.focus_chat.info.action_info import ActionInfo
from src.chat.focus_chat.info.structured_info import StructuredInfo
from src.chat.focus_chat.info.self_info import SelfInfo
from src.common.logger_manager import get_logger
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.individuality.individuality import individuality
from src.chat.focus_chat.planners.action_manager import ActionManager
from json_repair import repair_json
from src.chat.focus_chat.planners.base_planner import BasePlanner
from datetime import datetime
logger = get_logger("planner")
install(extra_lines=3)
def init_prompt():
Prompt(
"""
你的自我认知是:
{self_info_block}
{extra_info_block}
{memory_str}
{time_block}
你是群内的一员,你现在正在参与群内的闲聊,以下是群内的聊天内容:
{chat_content_block}
{mind_info_block}
{cycle_info_block}
注意除了下面动作选项之外你在群聊里不能做其他任何事情这是你能力的边界现在请你选择合适的action:
{moderation_prompt}
{action_options_text}
以严格的 JSON 格式输出,且仅包含 JSON 内容,不要有任何其他文字或解释。
请你以下面格式输出:
{{
"action": "action_name"
"参数": "参数的值"(可能有多个参数),
}}
请输出你提取的JSON不要有任何其他文字或解释
""",
"simple_planner_prompt",
)
Prompt(
"""
动作名称:{action_name}
描述:{action_description}
{action_parameters}
使用该动作的场景:
{action_require}""",
"action_prompt",
)
class ActionPlanner(BasePlanner):
def __init__(self, log_prefix: str, action_manager: ActionManager):
super().__init__(log_prefix, action_manager)
# LLM规划器配置
self.planner_llm = LLMRequest(
model=global_config.model.focus_planner,
max_tokens=1000,
request_type="focus.planner", # 用于动作规划
)
self.utils_llm = LLMRequest(
model=global_config.model.utils_small,
max_tokens=1000,
request_type="focus.planner", # 用于动作规划
)
async def plan(self, all_plan_info: List[InfoBase], running_memorys: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
参数:
all_plan_info: 所有计划信息
running_memorys: 回忆信息
"""
action = "no_reply" # 默认动作
reasoning = "规划器初始化默认"
action_data = {}
try:
# 获取观察信息
extra_info: list[str] = []
# 设置默认值
nickname_str = ""
for nicknames in global_config.bot.alias_names:
nickname_str += f"{nicknames},"
name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。"
personality_block = individuality.get_personality_prompt(x_person=2, level=2)
identity_block = individuality.get_identity_prompt(x_person=2, level=2)
self_info = name_block + personality_block + identity_block
current_mind = "你思考了很久,没有想清晰要做什么"
cycle_info = ""
structured_info = ""
extra_info = []
observed_messages = []
observed_messages_str = ""
chat_type = "group"
is_group_chat = True
for info in all_plan_info:
if isinstance(info, ObsInfo):
observed_messages = info.get_talking_message()
observed_messages_str = info.get_talking_message_str_truncate()
chat_type = info.get_chat_type()
is_group_chat = chat_type == "group"
elif isinstance(info, MindInfo):
current_mind = info.get_current_mind()
elif isinstance(info, CycleInfo):
cycle_info = info.get_observe_info()
elif isinstance(info, SelfInfo):
self_info = info.get_processed_info()
elif isinstance(info, StructuredInfo):
structured_info = info.get_processed_info()
# print(f"structured_info: {structured_info}")
# elif not isinstance(info, ActionInfo): # 跳过已处理的ActionInfo
# extra_info.append(info.get_processed_info())
# 获取当前可用的动作
current_available_actions = self.action_manager.get_using_actions()
# 如果没有可用动作或只有no_reply动作直接返回no_reply
if not current_available_actions or (
len(current_available_actions) == 1 and "no_reply" in current_available_actions
):
action = "no_reply"
reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用跳过规划"
logger.info(f"{self.log_prefix}{reasoning}")
self.action_manager.restore_actions()
logger.debug(
f"{self.log_prefix}沉默后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
)
return {
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
"current_mind": current_mind,
"observed_messages": observed_messages,
}
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
prompt = await self.build_planner_prompt(
self_info_block=self_info,
is_group_chat=is_group_chat, # <-- Pass HFC state
chat_target_info=None,
observed_messages_str=observed_messages_str, # <-- Pass local variable
current_mind=current_mind, # <-- Pass argument
structured_info=structured_info, # <-- Pass SubMind info
current_available_actions=current_available_actions, # <-- Pass determined actions
cycle_info=cycle_info, # <-- Pass cycle info
extra_info=extra_info,
running_memorys=running_memorys,
)
# --- 调用 LLM (普通文本生成) ---
llm_content = None
try:
prompt = f"{prompt}"
llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt)
logger.debug(
f"{self.log_prefix}规划器Prompt:\n{prompt}\n\n决策动作:{action},\n动作信息: '{action_data}'\n理由: {reasoning}"
)
logger.debug(f"{self.log_prefix}LLM 原始响应: {llm_content}")
logger.debug(f"{self.log_prefix}LLM 原始理由响应: {reasoning_content}")
except Exception as req_e:
logger.error(f"{self.log_prefix}LLM 请求执行失败: {req_e}")
reasoning = f"LLM 请求失败,你的模型出现问题: {req_e}"
action = "no_reply"
if llm_content:
try:
fixed_json_string = repair_json(llm_content)
if isinstance(fixed_json_string, str):
try:
parsed_json = json.loads(fixed_json_string)
except json.JSONDecodeError as decode_error:
logger.error(f"JSON解析错误: {str(decode_error)}")
parsed_json = {}
else:
# 如果repair_json直接返回了字典对象直接使用
parsed_json = fixed_json_string
# 提取决策,提供默认值
extracted_action = parsed_json.get("action", "no_reply")
# extracted_reasoning = parsed_json.get("reasoning", "LLM未提供理由")
extracted_reasoning = ""
# 将所有其他属性添加到action_data
action_data = {}
for key, value in parsed_json.items():
if key not in ["action", "reasoning"]:
action_data[key] = value
action_data["identity"] = self_info
# 对于reply动作不需要额外处理因为相关字段已经在上面的循环中添加到action_data
if extracted_action not in current_available_actions:
logger.warning(
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{extracted_action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
)
action = "no_reply"
reasoning = f"LLM 返回了当前不可用的动作 '{extracted_action}' (可用: {list(current_available_actions.keys())})。原始理由: {extracted_reasoning}"
else:
# 动作有效且可用
action = extracted_action
reasoning = extracted_reasoning
except Exception as json_e:
logger.warning(
f"{self.log_prefix}解析LLM响应JSON失败 {json_e}. LLM原始输出: '{llm_content}'"
)
traceback.print_exc()
reasoning = f"解析LLM响应JSON失败: {json_e}. 将使用默认动作 'no_reply'."
action = "no_reply"
except Exception as outer_e:
logger.error(f"{self.log_prefix}Planner 处理过程中发生意外错误,规划失败,将执行 no_reply: {outer_e}")
traceback.print_exc()
action = "no_reply"
reasoning = f"Planner 内部处理错误: {outer_e}"
# logger.debug(
# f"{self.log_prefix}规划器Prompt:\n{prompt}\n\n决策动作:{action},\n动作信息: '{action_data}'\n理由: {reasoning}"
# )
# 恢复到默认动作集
self.action_manager.restore_actions()
logger.debug(
f"{self.log_prefix}规划后恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
)
action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning}
plan_result = {
"action_result": action_result,
"current_mind": current_mind,
"observed_messages": observed_messages,
"action_prompt": prompt,
}
return plan_result
async def build_planner_prompt(
self,
self_info_block: str,
is_group_chat: bool, # Now passed as argument
chat_target_info: Optional[dict], # Now passed as argument
observed_messages_str: str,
current_mind: Optional[str],
structured_info: Optional[str],
current_available_actions: Dict[str, ActionInfo],
cycle_info: Optional[str],
extra_info: list[str],
running_memorys: List[Dict[str, Any]],
) -> str:
"""构建 Planner LLM 的提示词 (获取模板并填充数据)"""
try:
memory_str = ""
if global_config.focus_chat.parallel_processing:
memory_str = ""
if running_memorys:
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
for running_memory in running_memorys:
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
chat_context_description = "你现在正在一个群聊中"
chat_target_name = None # Only relevant for private
if not is_group_chat and chat_target_info:
chat_target_name = (
chat_target_info.get("person_name") or chat_target_info.get("user_nickname") or "对方"
)
chat_context_description = f"你正在和 {chat_target_name} 私聊"
chat_content_block = ""
if observed_messages_str:
chat_content_block = f"聊天记录:\n{observed_messages_str}"
else:
chat_content_block = "你还未开始聊天"
mind_info_block = ""
if current_mind:
mind_info_block = f"对聊天的规划:{current_mind}"
else:
mind_info_block = "你刚参与聊天"
personality_block = individuality.get_prompt(x_person=2, level=2)
action_options_block = ""
for using_actions_name, using_actions_info in current_available_actions.items():
# print(using_actions_name)
# print(using_actions_info)
# print(using_actions_info["parameters"])
# print(using_actions_info["require"])
# print(using_actions_info["description"])
using_action_prompt = await global_prompt_manager.get_prompt_async("action_prompt")
param_text = ""
for param_name, param_description in using_actions_info["parameters"].items():
param_text += f" {param_name}: {param_description}\n"
require_text = ""
for require_item in using_actions_info["require"]:
require_text += f" - {require_item}\n"
if param_text:
param_text = f"参数:\n{param_text}"
else:
param_text = "无需参数"
using_action_prompt = using_action_prompt.format(
action_name=using_actions_name,
action_description=using_actions_info["description"],
action_parameters=param_text,
action_require=require_text,
)
action_options_block += using_action_prompt
extra_info_block = "\n".join(extra_info)
extra_info_block += f"\n{structured_info}"
if extra_info or structured_info:
extra_info_block = f"以下是一些额外的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是一些额外的信息,现在请你阅读以下内容,进行决策"
else:
extra_info_block = ""
# moderation_prompt_block = "请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。"
moderation_prompt_block = ""
# 获取当前时间
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
planner_prompt_template = await global_prompt_manager.get_prompt_async("simple_planner_prompt")
prompt = planner_prompt_template.format(
self_info_block=self_info_block,
memory_str=memory_str,
time_block=time_block,
# bot_name=global_config.bot.nickname,
prompt_personality=personality_block,
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,
mind_info_block=mind_info_block,
cycle_info_block=cycle_info,
action_options_text=action_options_block,
# action_available_block=action_available_block,
extra_info_block=extra_info_block,
moderation_prompt=moderation_prompt_block,
)
return prompt
except Exception as e:
logger.error(f"构建 Planner 提示词时出错: {e}")
logger.error(traceback.format_exc())
return "构建 Planner Prompt 时出错"
init_prompt()

View File

@@ -0,0 +1,650 @@
import traceback
from typing import List, Optional, Dict, Any, Tuple
from src.chat.message_receive.message import MessageRecv, MessageThinking, MessageSending
from src.chat.message_receive.message import Seg # Local import needed after move
from src.chat.message_receive.message import UserInfo
from src.chat.message_receive.chat_stream import chat_manager
from src.common.logger_manager import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.utils.utils_image import image_path_to_base64 # Local import needed after move
from src.chat.utils.timer_calculator import Timer # <--- Import Timer
from src.chat.emoji_system.emoji_manager import emoji_manager
from src.chat.focus_chat.heartFC_sender import HeartFCSender
from src.chat.utils.utils import process_llm_response
from src.chat.utils.info_catcher import info_catcher_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
import time
from src.chat.focus_chat.expressors.exprssion_learner import expression_learner
import random
from datetime import datetime
import re
logger = get_logger("expressor")
def init_prompt():
Prompt(
"""
你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
{style_habbits}
{time_block}
你现在正在群里聊天,以下是群里正在进行的聊天内容:
{chat_info}
以上是聊天内容,你需要了解聊天记录中的内容
{chat_target}
{identity},在这聊天中,"{target_message}"引起了你的注意,你想要在群里发言或者回复这条消息。
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。注意不要复读你说过的话。
请你根据情景使用以下句法:
{grammar_habbits}
{config_expression_style},请注意不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容。
{keywords_reaction_prompt}
请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。
不要浮夸,不要夸张修辞,只输出一条回复就好。
现在,你说:
""",
"default_replyer_prompt",
)
Prompt(
"""
你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
{style_habbits}
{time_block}
你现在正在聊天,以下是你和对方正在进行的聊天内容:
{chat_info}
以上是聊天内容,你需要了解聊天记录中的内容
{chat_target}
{identity},在这聊天中,"{target_message}"引起了你的注意,你想要发言或者回复这条消息。
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。注意不要复读你说过的话。
请你根据情景使用以下句法:
{grammar_habbits}
{config_expression_style},请注意不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容。
{keywords_reaction_prompt}
请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。
不要浮夸,不要夸张修辞,只输出一条回复就好。
现在,你说:
""",
"default_replyer_private_prompt",
)
class DefaultReplyer:
def __init__(self, chat_id: str):
self.log_prefix = "replyer"
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
model=global_config.model.focus_expressor,
# temperature=global_config.model.focus_expressor["temp"],
max_tokens=256,
request_type="focus.expressor",
)
self.heart_fc_sender = HeartFCSender()
self.chat_id = chat_id
self.chat_stream: Optional[ChatStream] = None
self.is_group_chat = True
self.chat_target_info = None
async def initialize(self):
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
async def _create_thinking_message(self, anchor_message: Optional[MessageRecv], thinking_id: str):
"""创建思考消息 (尝试锚定到 anchor_message)"""
if not anchor_message or not anchor_message.chat_stream:
logger.error(f"{self.log_prefix} 无法创建思考消息,缺少有效的锚点消息或聊天流。")
return None
chat = anchor_message.chat_stream
messageinfo = anchor_message.message_info
thinking_time_point = parse_thinking_id_to_timestamp(thinking_id)
bot_user_info = UserInfo(
user_id=global_config.bot.qq_account,
user_nickname=global_config.bot.nickname,
platform=messageinfo.platform,
)
thinking_message = MessageThinking(
message_id=thinking_id,
chat_stream=chat,
bot_user_info=bot_user_info,
reply=anchor_message, # 回复的是锚点消息
thinking_start_time=thinking_time_point,
)
# logger.debug(f"创建思考消息thinking_message{thinking_message}")
await self.heart_fc_sender.register_thinking(thinking_message)
async def deal_reply(
self,
cycle_timers: dict,
action_data: Dict[str, Any],
reasoning: str,
anchor_message: MessageRecv,
thinking_id: str,
) -> tuple[bool, Optional[List[Tuple[str, str]]]]:
# 创建思考消息
await self._create_thinking_message(anchor_message, thinking_id)
reply = [] # 初始化 reply防止未定义
try:
has_sent_something = False
# 处理文本部分
# text_part = action_data.get("text", [])
# if text_part:
with Timer("生成回复", cycle_timers):
# 可以保留原有的文本处理逻辑或进行适当调整
reply = await self.reply(
# in_mind_reply=text_part,
anchor_message=anchor_message,
thinking_id=thinking_id,
reason=reasoning,
action_data=action_data,
)
# with Timer("选择表情", cycle_timers):
# emoji_keyword = action_data.get("emojis", [])
# emoji_base64 = await self._choose_emoji(emoji_keyword)
# if emoji_base64:
# reply.append(("emoji", emoji_base64))
if reply:
with Timer("发送消息", cycle_timers):
sent_msg_list = await self.send_response_messages(
anchor_message=anchor_message,
thinking_id=thinking_id,
response_set=reply,
)
has_sent_something = True
else:
logger.warning(f"{self.log_prefix} 文本回复生成失败")
if not has_sent_something:
logger.warning(f"{self.log_prefix} 回复动作未包含任何有效内容")
return has_sent_something, sent_msg_list
except Exception as e:
logger.error(f"回复失败: {e}")
traceback.print_exc()
return False, None
# --- 回复器 (Replier) 的定义 --- #
async def deal_emoji(
self,
anchor_message: MessageRecv,
thinking_id: str,
action_data: Dict[str, Any],
cycle_timers: dict,
) -> Optional[List[str]]:
"""
表情动作处理类
"""
await self._create_thinking_message(anchor_message, thinking_id)
try:
has_sent_something = False
sent_msg_list = []
reply = []
with Timer("选择表情", cycle_timers):
emoji_keyword = action_data.get("description", [])
emoji_base64, description = await self._choose_emoji(emoji_keyword)
if emoji_base64:
logger.info(f"选择表情: {description}")
reply.append(("emoji", emoji_base64))
else:
logger.warning(f"{self.log_prefix} 没有找到合适表情")
if reply:
with Timer("发送表情", cycle_timers):
sent_msg_list = await self.send_response_messages(
anchor_message=anchor_message,
thinking_id=thinking_id,
response_set=reply,
)
has_sent_something = True
else:
logger.warning(f"{self.log_prefix} 表情发送失败")
if not has_sent_something:
logger.warning(f"{self.log_prefix} 表情发送失败")
return has_sent_something, sent_msg_list
except Exception as e:
logger.error(f"回复失败: {e}")
traceback.print_exc()
return False, None
async def reply(
self,
# in_mind_reply: str,
reason: str,
anchor_message: MessageRecv,
thinking_id: str,
action_data: Dict[str, Any],
) -> Optional[List[str]]:
"""
回复器 (Replier): 核心逻辑,负责生成回复文本。
(已整合原 HeartFCGenerator 的功能)
"""
try:
# 1. 获取情绪影响因子并调整模型温度
# arousal_multiplier = mood_manager.get_arousal_multiplier()
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
# self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
# --- Determine sender_name for private chat ---
sender_name_for_prompt = "某人" # Default for group or if info unavailable
if not self.is_group_chat and self.chat_target_info:
# Prioritize person_name, then nickname
sender_name_for_prompt = (
self.chat_target_info.get("person_name")
or self.chat_target_info.get("user_nickname")
or sender_name_for_prompt
)
# --- End determining sender_name ---
target_message = action_data.get("target", "")
identity = action_data.get("identity", "")
# 3. 构建 Prompt
with Timer("构建Prompt", {}): # 内部计时器,可选保留
prompt = await self.build_prompt_focus(
chat_stream=self.chat_stream, # Pass the stream object
# in_mind_reply=in_mind_reply,
identity=identity,
reason=reason,
sender_name=sender_name_for_prompt, # Pass determined name
target_message=target_message,
config_expression_style=global_config.expression.expression_style,
)
# 4. 调用 LLM 生成回复
content = None
reasoning_content = None
model_name = "unknown_model"
if not prompt:
logger.error(f"{self.log_prefix}[Replier-{thinking_id}] Prompt 构建失败,无法生成回复。")
return None
try:
with Timer("LLM生成", {}): # 内部计时器,可选保留
# TODO: API-Adapter修改标记
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt)
logger.debug(f"prompt: {prompt}")
logger.info(f"最终回复: {content}")
info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=model_name
)
except Exception as llm_e:
# 精简报错信息
logger.error(f"{self.log_prefix}LLM 生成失败: {llm_e}")
return None # LLM 调用失败则无法生成回复
processed_response = process_llm_response(content)
# 5. 处理 LLM 响应
if not content:
logger.warning(f"{self.log_prefix}LLM 生成了空内容。")
return None
if not processed_response:
logger.warning(f"{self.log_prefix}处理后的回复为空。")
return None
reply_set = []
for str in processed_response:
reply_seg = ("text", str)
reply_set.append(reply_seg)
return reply_set
except Exception as e:
logger.error(f"{self.log_prefix}回复生成意外失败: {e}")
traceback.print_exc()
return None
async def build_prompt_focus(
self,
reason,
chat_stream,
sender_name,
# in_mind_reply,
identity,
target_message,
config_expression_style,
) -> str:
is_group_chat = bool(chat_stream.group_info)
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.focus_chat.observation_context_size,
)
chat_talking_prompt = await build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=True,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
)
(
learnt_style_expressions,
learnt_grammar_expressions,
personality_expressions,
) = await expression_learner.get_expression_by_chat_id(chat_stream.stream_id)
style_habbits = []
grammar_habbits = []
# 1. learnt_expressions加权随机选3条
if learnt_style_expressions:
weights = [expr["count"] for expr in learnt_style_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 2. learnt_grammar_expressions加权随机选3条
if learnt_grammar_expressions:
weights = [expr["count"] for expr in learnt_grammar_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
grammar_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 3. personality_expressions随机选1条
if personality_expressions:
expr = random.choice(personality_expressions)
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
style_habbits_str = "\n".join(style_habbits)
grammar_habbits_str = "\n".join(grammar_habbits)
# 关键词检测与反应
keywords_reaction_prompt = ""
try:
# 处理关键词规则
for rule in global_config.keyword_reaction.keyword_rules:
if any(keyword in target_message for keyword in rule.keywords):
logger.info(f"检测到关键词规则:{rule.keywords},触发反应:{rule.reaction}")
keywords_reaction_prompt += f"{rule.reaction}"
# 处理正则表达式规则
for rule in global_config.keyword_reaction.regex_rules:
for pattern_str in rule.regex:
try:
pattern = re.compile(pattern_str)
if result := pattern.search(target_message):
reaction = rule.reaction
for name, content in result.groupdict().items():
reaction = reaction.replace(f"[{name}]", content)
logger.info(f"匹配到正则表达式:{pattern_str},触发反应:{reaction}")
keywords_reaction_prompt += reaction + ""
break
except re.error as e:
logger.error(f"正则表达式编译错误: {pattern_str}, 错误信息: {str(e)}")
continue
except Exception as e:
logger.error(f"关键词检测与反应时发生异常: {str(e)}", exc_info=True)
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
# logger.debug("开始构建 focus prompt")
# --- Choose template based on chat type ---
if is_group_chat:
template_name = "default_replyer_prompt"
# Group specific formatting variables (already fetched or default)
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
prompt = await global_prompt_manager.format_prompt(
template_name,
style_habbits=style_habbits_str,
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
time_block=time_block,
# bot_name=global_config.bot.nickname,
# prompt_personality="",
# reason=reason,
# in_mind_reply=in_mind_reply,
keywords_reaction_prompt=keywords_reaction_prompt,
identity=identity,
target_message=target_message,
config_expression_style=config_expression_style,
)
else: # Private chat
template_name = "default_replyer_private_prompt"
chat_target_1 = "你正在和人私聊"
prompt = await global_prompt_manager.format_prompt(
template_name,
style_habbits=style_habbits_str,
grammar_habbits=grammar_habbits_str,
chat_target=chat_target_1,
chat_info=chat_talking_prompt,
time_block=time_block,
# bot_name=global_config.bot.nickname,
# prompt_personality="",
# reason=reason,
# in_mind_reply=in_mind_reply,
keywords_reaction_prompt=keywords_reaction_prompt,
identity=identity,
target_message=target_message,
config_expression_style=config_expression_style,
)
return prompt
# --- 发送器 (Sender) --- #
async def send_response_messages(
self,
anchor_message: Optional[MessageRecv],
response_set: List[Tuple[str, str]],
thinking_id: str = "",
display_message: str = "",
) -> Optional[MessageSending]:
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
chat = self.chat_stream
chat_id = self.chat_id
if chat is None:
logger.error(f"{self.log_prefix} 无法发送回复chat_stream 为空。")
return None
if not anchor_message:
logger.error(f"{self.log_prefix} 无法发送回复anchor_message 为空。")
return None
stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志
# 检查思考过程是否仍在进行,并获取开始时间
if thinking_id:
# print(f"thinking_id: {thinking_id}")
thinking_start_time = await self.heart_fc_sender.get_thinking_start_time(chat_id, thinking_id)
else:
print("thinking_id is None")
# thinking_id = "ds" + str(round(time.time(), 2))
thinking_start_time = time.time()
if thinking_start_time is None:
logger.error(f"[{stream_name}]replyer思考过程未找到或已结束无法发送回复。")
return None
mark_head = False
# first_bot_msg: Optional[MessageSending] = None
reply_message_ids = [] # 记录实际发送的消息ID
sent_msg_list = []
for i, msg_text in enumerate(response_set):
# 为每个消息片段生成唯一ID
type = msg_text[0]
data = msg_text[1]
if global_config.experimental.debug_show_chat_mode and type == "text":
data += ""
part_message_id = f"{thinking_id}_{i}"
message_segment = Seg(type=type, data=data)
if type == "emoji":
is_emoji = True
else:
is_emoji = False
reply_to = not mark_head
bot_message = await self._build_single_sending_message(
anchor_message=anchor_message,
message_id=part_message_id,
message_segment=message_segment,
display_message=display_message,
reply_to=reply_to,
is_emoji=is_emoji,
thinking_id=thinking_id,
thinking_start_time=thinking_start_time,
)
try:
if not mark_head:
mark_head = True
# first_bot_msg = bot_message # 保存第一个成功发送的消息对象
typing = False
else:
typing = True
if type == "emoji":
typing = False
if anchor_message.raw_message:
set_reply = True
else:
set_reply = False
sent_msg = await self.heart_fc_sender.send_message(
bot_message, has_thinking=True, typing=typing, set_reply=set_reply
)
reply_message_ids.append(part_message_id) # 记录我们生成的ID
sent_msg_list.append((type, sent_msg))
except Exception as e:
logger.error(f"{self.log_prefix}发送回复片段 {i} ({part_message_id}) 时失败: {e}")
traceback.print_exc()
# 这里可以选择是继续发送下一个片段还是中止
# 在尝试发送完所有片段后,完成原始的 thinking_id 状态
try:
await self.heart_fc_sender.complete_thinking(chat_id, thinking_id)
except Exception as e:
logger.error(f"{self.log_prefix}完成思考状态 {thinking_id} 时出错: {e}")
return sent_msg_list
async def _choose_emoji(self, send_emoji: str):
"""
选择表情根据send_emoji文本选择表情返回表情base64
"""
emoji_base64 = ""
description = ""
emoji_raw = await emoji_manager.get_emoji_for_text(send_emoji)
if emoji_raw:
emoji_path, description = emoji_raw
emoji_base64 = image_path_to_base64(emoji_path)
return emoji_base64, description
async def _build_single_sending_message(
self,
anchor_message: MessageRecv,
message_id: str,
message_segment: Seg,
reply_to: bool,
is_emoji: bool,
thinking_id: str,
thinking_start_time: float,
display_message: str,
) -> MessageSending:
"""构建单个发送消息"""
bot_user_info = UserInfo(
user_id=global_config.bot.qq_account,
user_nickname=global_config.bot.nickname,
platform=self.chat_stream.platform,
)
bot_message = MessageSending(
message_id=message_id, # 使用片段的唯一ID
chat_stream=self.chat_stream,
bot_user_info=bot_user_info,
sender_info=anchor_message.message_info.user_info,
message_segment=message_segment,
reply=anchor_message, # 回复原始锚点
is_head=reply_to,
is_emoji=is_emoji,
thinking_start_time=thinking_start_time, # 传递原始思考开始时间
display_message=display_message,
)
return bot_message
def weighted_sample_no_replacement(items, weights, k) -> list:
"""
加权且不放回地随机抽取k个元素。
参数:
items: 待抽取的元素列表
weights: 每个元素对应的权重与items等长且为正数
k: 需要抽取的元素个数
返回:
selected: 按权重加权且不重复抽取的k个元素组成的列表
如果items中的元素不足k就只会返回所有可用的元素
实现思路:
每次从当前池中按权重加权随机选出一个元素选中后将其从池中移除重复k次。
这样保证了:
1. count越大被选中概率越高
2. 不会重复选中同一个元素
"""
selected = []
pool = list(zip(items, weights))
for _ in range(min(k, len(pool))):
total = sum(w for _, w in pool)
r = random.uniform(0, total)
upto = 0
for idx, (item, weight) in enumerate(pool):
upto += weight
if upto >= r:
selected.append(item)
pool.pop(idx)
break
return selected
init_prompt()

View File

@@ -224,39 +224,27 @@ class MemoryManager:
Returns: Returns:
包含总结、概括、关键概念和事件的字典 包含总结、概括、关键概念和事件的字典
""" """
prompt = f"""请对以下内容进行总结,总结成记忆,输出部分: prompt = f"""请对以下内容进行总结,总结成记忆,输出部分:
1. 记忆内容主题精简20字以内让用户可以一眼看出记忆内容是什么 1. 记忆内容主题精简20字以内让用户可以一眼看出记忆内容是什么
2. 记忆内容概括200字以内让用户可以了解记忆内容的大致内容 2. key_points多条包含关键的概念、事件每条都要包含解释或描述谁在什么时候干了什么
3. 关键概念和知识keypoints多条提取关键的概念、知识点和关键词要包含对概念的解释
4. 事件描述events多条描述谁人物在什么时候时间做了什么事件
内容: 内容:
{content} {content}
请按以下JSON格式输出 请按以下JSON格式输出
```json
{{ {{
"brief": "记忆内容主题20字以内", "brief": "记忆内容主题20字以内",
"detailed": "记忆内容概括200字以内", "key_points": [
"keypoints": [ "要点1解释或描述",
"概念1解释", "要点2解释或描述",
"概念2解释",
...
],
"events": [
"事件1谁在什么时候做了什么",
"事件2谁在什么时候做了什么",
... ...
] ]
}} }}
```
请确保输出是有效的JSON格式不要添加任何额外的说明或解释。 请确保输出是有效的JSON格式不要添加任何额外的说明或解释。
""" """
default_summary = { default_summary = {
"brief": "主题未知的记忆", "brief": "主题未知的记忆",
"detailed": "大致内容未知的记忆", "key_points": ["未知的要点"],
"keypoints": ["未知的概念"],
"events": ["未知的事件"],
} }
try: try:
@@ -288,29 +276,14 @@ class MemoryManager:
if "brief" not in json_result or not isinstance(json_result["brief"], str): if "brief" not in json_result or not isinstance(json_result["brief"], str):
json_result["brief"] = "主题未知的记忆" json_result["brief"] = "主题未知的记忆"
if "detailed" not in json_result or not isinstance(json_result["detailed"], str): # 处理关键要点
json_result["detailed"] = "大致内容未知的记忆" if "key_points" not in json_result or not isinstance(json_result["key_points"], list):
json_result["key_points"] = ["未知的要点"]
# 处理关键概念
if "keypoints" not in json_result or not isinstance(json_result["keypoints"], list):
json_result["keypoints"] = ["未知的概念"]
else: else:
# 确保keypoints中的每个项目都是字符串 # 确保key_points中的每个项目都是字符串
json_result["keypoints"] = [str(point) for point in json_result["keypoints"] if point is not None] json_result["key_points"] = [str(point) for point in json_result["key_points"] if point is not None]
if not json_result["keypoints"]: if not json_result["key_points"]:
json_result["keypoints"] = ["未知的概念"] json_result["key_points"] = ["未知的要点"]
# 处理事件
if "events" not in json_result or not isinstance(json_result["events"], list):
json_result["events"] = ["未知的事件"]
else:
# 确保events中的每个项目都是字符串
json_result["events"] = [str(event) for event in json_result["events"] if event is not None]
if not json_result["events"]:
json_result["events"] = ["未知的事件"]
# 兼容旧版将keypoints和events合并到key_points中
json_result["key_points"] = json_result["keypoints"] + json_result["events"]
return json_result return json_result
@@ -348,52 +321,31 @@ class MemoryManager:
# 使用LLM根据要求对总结、概括和要点进行精简修改 # 使用LLM根据要求对总结、概括和要点进行精简修改
prompt = f""" prompt = f"""
请根据以下要求,对记忆内容的主题、概括、关键概念和事件进行精简,模拟记忆的遗忘过程: 请根据以下要求,对记忆内容的主题和关键要点进行精简,模拟记忆的遗忘过程:
要求:{requirements} 要求:{requirements}
你可以随机对关键概念和事件进行压缩,模糊或者丢弃,修改后,同样修改主题和概括 你可以随机对关键要点进行压缩,模糊或者丢弃,修改后,同样修改主题
目前主题:{summary["brief"]} 目前主题:{summary["brief"]}
目前概括:{summary["detailed"]} 目前关键要点:
{chr(10).join([f"- {point}" for point in summary.get("key_points", [])])}
目前关键概念 请生成修改后的主题和关键要点,遵循以下格式
{chr(10).join([f"- {point}" for point in summary.get("keypoints", [])])}
目前事件:
{chr(10).join([f"- {point}" for point in summary.get("events", [])])}
请生成修改后的主题、概括、关键概念和事件,遵循以下格式:
```json ```json
{{ {{
"brief": "修改后的主题20字以内", "brief": "修改后的主题20字以内",
"detailed": "修改后的概括200字以内", "key_points": [
"keypoints": [ "修改后的要点1解释或描述",
"修改后的概念1解释", "修改后的要点2解释或描述"
"修改后的概念2解释"
],
"events": [
"修改后的事件1谁在什么时候做了什么",
"修改后的事件2谁在什么时候做了什么"
] ]
}} }}
``` ```
请确保输出是有效的JSON格式不要添加任何额外的说明或解释。 请确保输出是有效的JSON格式不要添加任何额外的说明或解释。
""" """
# 检查summary中是否有旧版结构转换为新版结构
if "keypoints" not in summary and "events" not in summary and "key_points" in summary:
# 尝试区分key_points中的keypoints和events
# 简单地将前半部分视为keypoints后半部分视为events
key_points = summary.get("key_points", [])
halfway = len(key_points) // 2
summary["keypoints"] = key_points[:halfway] or ["未知的概念"]
summary["events"] = key_points[halfway:] or ["未知的事件"]
# 定义默认的精简结果 # 定义默认的精简结果
default_refined = { default_refined = {
"brief": summary["brief"], "brief": summary["brief"],
"detailed": summary["detailed"], "key_points": summary.get("key_points", ["未知的要点"])[:1], # 默认只保留第一个要点
"keypoints": summary.get("keypoints", ["未知的概念"])[:1], # 默认只保留第一个关键概念
"events": summary.get("events", ["未知的事件"])[:1], # 默认只保留第一个事件
} }
try: try:
@@ -421,30 +373,17 @@ class MemoryManager:
logger.error(f"修复后的JSON不是字典类型: {type(refined_data)}") logger.error(f"修复后的JSON不是字典类型: {type(refined_data)}")
refined_data = default_refined refined_data = default_refined
# 更新总结、概括 # 更新总结
summary["brief"] = refined_data.get("brief", "主题未知的记忆") summary["brief"] = refined_data.get("brief", "主题未知的记忆")
summary["detailed"] = refined_data.get("detailed", "大致内容未知的记忆")
# 更新关键概念 # 更新关键要点
keypoints = refined_data.get("keypoints", []) key_points = refined_data.get("key_points", [])
if isinstance(keypoints, list) and keypoints: if isinstance(key_points, list) and key_points:
# 确保所有关键概念都是字符串 # 确保所有要点都是字符串
summary["keypoints"] = [str(point) for point in keypoints if point is not None] summary["key_points"] = [str(point) for point in key_points if point is not None]
else: else:
# 如果keypoints不是列表或为空使用默认值 # 如果key_points不是列表或为空使用默认值
summary["keypoints"] = ["主要概念已遗忘"] summary["key_points"] = ["主要要点已遗忘"]
# 更新事件
events = refined_data.get("events", [])
if isinstance(events, list) and events:
# 确保所有事件都是字符串
summary["events"] = [str(event) for event in events if event is not None]
else:
# 如果events不是列表或为空使用默认值
summary["events"] = ["事件细节已遗忘"]
# 兼容旧版维护key_points
summary["key_points"] = summary["keypoints"] + summary["events"]
except Exception as e: except Exception as e:
logger.error(f"精简记忆出错: {str(e)}") logger.error(f"精简记忆出错: {str(e)}")
@@ -452,9 +391,7 @@ class MemoryManager:
# 出错时使用简化的默认精简 # 出错时使用简化的默认精简
summary["brief"] = summary["brief"] + " (已简化)" summary["brief"] = summary["brief"] + " (已简化)"
summary["keypoints"] = summary.get("keypoints", ["未知的概念"])[:1] summary["key_points"] = summary.get("key_points", ["未知的要点"])[:1]
summary["events"] = summary.get("events", ["未知的事件"])[:1]
summary["key_points"] = summary["keypoints"] + summary["events"]
except Exception as e: except Exception as e:
logger.error(f"精简记忆调用LLM出错: {str(e)}") logger.error(f"精简记忆调用LLM出错: {str(e)}")
@@ -573,27 +510,11 @@ class MemoryManager:
# 如果有摘要信息,添加到提示中 # 如果有摘要信息,添加到提示中
if summary1: if summary1:
prompt += f"记忆1主题{summary1['brief']}\n" prompt += f"记忆1主题{summary1['brief']}\n"
prompt += f"记忆1概括:{summary1['detailed']}\n" prompt += "记忆1关键要点:\n" + "\n".join([f"- {point}" for point in summary1.get("key_points", [])]) + "\n\n"
if "keypoints" in summary1:
prompt += "记忆1关键概念\n" + "\n".join([f"- {point}" for point in summary1["keypoints"]]) + "\n\n"
if "events" in summary1:
prompt += "记忆1事件\n" + "\n".join([f"- {point}" for point in summary1["events"]]) + "\n\n"
elif "key_points" in summary1:
prompt += "记忆1要点\n" + "\n".join([f"- {point}" for point in summary1["key_points"]]) + "\n\n"
if summary2: if summary2:
prompt += f"记忆2主题{summary2['brief']}\n" prompt += f"记忆2主题{summary2['brief']}\n"
prompt += f"记忆2概括:{summary2['detailed']}\n" prompt += "记忆2关键要点:\n" + "\n".join([f"- {point}" for point in summary2.get("key_points", [])]) + "\n\n"
if "keypoints" in summary2:
prompt += "记忆2关键概念\n" + "\n".join([f"- {point}" for point in summary2["keypoints"]]) + "\n\n"
if "events" in summary2:
prompt += "记忆2事件\n" + "\n".join([f"- {point}" for point in summary2["events"]]) + "\n\n"
elif "key_points" in summary2:
prompt += "记忆2要点\n" + "\n".join([f"- {point}" for point in summary2["key_points"]]) + "\n\n"
# 添加记忆原始内容 # 添加记忆原始内容
prompt += f""" prompt += f"""
@@ -608,15 +529,10 @@ class MemoryManager:
{{ {{
"content": "合并后的记忆内容文本(尽可能保留原信息,但去除重复)", "content": "合并后的记忆内容文本(尽可能保留原信息,但去除重复)",
"brief": "合并后的主题20字以内", "brief": "合并后的主题20字以内",
"detailed": "合并后的概括200字以内", "key_points": [
"keypoints": [ "合并后的要点1解释或描述",
"合并后的概念1解释", "合并后的要点2解释或描述",
"合并后的概念2解释", "合并后的要点3解释或描述"
"合并后的概念3解释"
],
"events": [
"合并后的事件1谁在什么时候做了什么",
"合并后的事件2谁在什么时候做了什么"
] ]
}} }}
``` ```
@@ -627,40 +543,18 @@ class MemoryManager:
default_merged = { default_merged = {
"content": f"{content1}\n\n{content2}", "content": f"{content1}\n\n{content2}",
"brief": f"合并:{summary1['brief']} + {summary2['brief']}", "brief": f"合并:{summary1['brief']} + {summary2['brief']}",
"detailed": f"合并了两个记忆:{summary1['detailed']} 以及 {summary2['detailed']}", "key_points": [],
"keypoints": [],
"events": [],
} }
# 合并旧版key_points # 合并key_points
if "key_points" in summary1: if "key_points" in summary1:
default_merged["keypoints"].extend(summary1.get("keypoints", [])) default_merged["key_points"].extend(summary1["key_points"])
default_merged["events"].extend(summary1.get("events", []))
# 如果没有新的结构,尝试从旧结构分离
if not default_merged["keypoints"] and not default_merged["events"] and "key_points" in summary1:
key_points = summary1["key_points"]
halfway = len(key_points) // 2
default_merged["keypoints"].extend(key_points[:halfway])
default_merged["events"].extend(key_points[halfway:])
if "key_points" in summary2: if "key_points" in summary2:
default_merged["keypoints"].extend(summary2.get("keypoints", [])) default_merged["key_points"].extend(summary2["key_points"])
default_merged["events"].extend(summary2.get("events", []))
# 如果没有新的结构,尝试从旧结构分离
if not default_merged["keypoints"] and not default_merged["events"] and "key_points" in summary2:
key_points = summary2["key_points"]
halfway = len(key_points) // 2
default_merged["keypoints"].extend(key_points[:halfway])
default_merged["events"].extend(key_points[halfway:])
# 确保列表不为空 # 确保列表不为空
if not default_merged["keypoints"]: if not default_merged["key_points"]:
default_merged["keypoints"] = ["合并的关键概念"] default_merged["key_points"] = ["合并的要点"]
if not default_merged["events"]:
default_merged["events"] = ["合并的事件"]
# 添加key_points兼容
default_merged["key_points"] = default_merged["keypoints"] + default_merged["events"]
try: try:
# 调用LLM合并记忆 # 调用LLM合并记忆
@@ -694,29 +588,14 @@ class MemoryManager:
if "brief" not in merged_data or not isinstance(merged_data["brief"], str): if "brief" not in merged_data or not isinstance(merged_data["brief"], str):
merged_data["brief"] = default_merged["brief"] merged_data["brief"] = default_merged["brief"]
if "detailed" not in merged_data or not isinstance(merged_data["detailed"], str): # 处理关键要点
merged_data["detailed"] = default_merged["detailed"] if "key_points" not in merged_data or not isinstance(merged_data["key_points"], list):
merged_data["key_points"] = default_merged["key_points"]
# 处理关键概念
if "keypoints" not in merged_data or not isinstance(merged_data["keypoints"], list):
merged_data["keypoints"] = default_merged["keypoints"]
else: else:
# 确保keypoints中的每个项目都是字符串 # 确保key_points中的每个项目都是字符串
merged_data["keypoints"] = [str(point) for point in merged_data["keypoints"] if point is not None] merged_data["key_points"] = [str(point) for point in merged_data["key_points"] if point is not None]
if not merged_data["keypoints"]: if not merged_data["key_points"]:
merged_data["keypoints"] = ["合并的关键概念"] merged_data["key_points"] = ["合并的要点"]
# 处理事件
if "events" not in merged_data or not isinstance(merged_data["events"], list):
merged_data["events"] = default_merged["events"]
else:
# 确保events中的每个项目都是字符串
merged_data["events"] = [str(event) for event in merged_data["events"] if event is not None]
if not merged_data["events"]:
merged_data["events"] = ["合并的事件"]
# 添加key_points兼容
merged_data["key_points"] = merged_data["keypoints"] + merged_data["events"]
except Exception as e: except Exception as e:
logger.error(f"合并记忆时处理JSON出错: {str(e)}") logger.error(f"合并记忆时处理JSON出错: {str(e)}")
@@ -744,9 +623,6 @@ class MemoryManager:
# 设置合并后的摘要 # 设置合并后的摘要
summary = { summary = {
"brief": merged_data["brief"], "brief": merged_data["brief"],
"detailed": merged_data["detailed"],
"keypoints": merged_data["keypoints"],
"events": merged_data["events"],
"key_points": merged_data["key_points"], "key_points": merged_data["key_points"],
} }
merged_memory.set_summary(summary) merged_memory.set_summary(summary)

View File

@@ -227,7 +227,7 @@ class ChattingObservation(Observation):
# print(f"压缩中oldest_messages: {oldest_messages}") # print(f"压缩中oldest_messages: {oldest_messages}")
oldest_messages_str = await build_readable_messages( oldest_messages_str = await build_readable_messages(
messages=oldest_messages, timestamp_mode="normal", read_mark=0 messages=oldest_messages, timestamp_mode="normal_no_YMD", read_mark=0
) )
# --- Build prompt using template --- # --- Build prompt using template ---
@@ -278,7 +278,7 @@ class ChattingObservation(Observation):
# print(f"构建中self.talking_message_str: {self.talking_message_str}") # print(f"构建中self.talking_message_str: {self.talking_message_str}")
self.talking_message_str_truncate = await build_readable_messages( self.talking_message_str_truncate = await build_readable_messages(
messages=self.talking_message, messages=self.talking_message,
timestamp_mode="normal", timestamp_mode="normal_no_YMD",
read_mark=last_obs_time_mark, read_mark=last_obs_time_mark,
truncate=True, truncate=True,
) )

View File

@@ -25,8 +25,8 @@ logger.info("正在从文件加载Embedding库")
try: try:
embed_manager.load_from_file() embed_manager.load_from_file()
except Exception as e: except Exception as e:
logger.error("从文件加载Embedding库时发生错误:{}".format(e)) logger.warning("此问题不会影响正常使用:从文件加载Embedding库时{}".format(e))
logger.error("如果你是第一次导入知识,或者还未导入知识,请忽略此错误") # logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("Embedding库加载完成") logger.info("Embedding库加载完成")
# 初始化KG # 初始化KG
kg_manager = KGManager() kg_manager = KGManager()
@@ -34,8 +34,8 @@ logger.info("正在从文件加载KG")
try: try:
kg_manager.load_from_file() kg_manager.load_from_file()
except Exception as e: except Exception as e:
logger.error("从文件加载KG时发生错误:{}".format(e)) logger.warning("此问题不会影响正常使用:从文件加载KG时{}".format(e))
logger.error("如果你是第一次导入知识,或者还未导入知识,请忽略此错误") # logger.warning("如果你是第一次导入知识,或者还未导入知识,请忽略此错误")
logger.info("KG加载完成") logger.info("KG加载完成")
logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}") logger.info(f"KG节点数量{len(kg_manager.graph.get_node_list())}")

View File

@@ -12,6 +12,7 @@ from src.chat.memory_system.Hippocampus import HippocampusManager
from src.chat.knowledge.knowledge_lib import qa_manager from src.chat.knowledge.knowledge_lib import qa_manager
from src.chat.focus_chat.expressors.exprssion_learner import expression_learner from src.chat.focus_chat.expressors.exprssion_learner import expression_learner
import random import random
import re
logger = get_logger("prompt") logger = get_logger("prompt")
@@ -40,8 +41,9 @@ def init_prompt():
你的网名叫{bot_name},有人也叫你{bot_other_names}{prompt_personality} 你的网名叫{bot_name},有人也叫你{bot_other_names}{prompt_personality}
{action_descriptions}你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},请你给出回复 {action_descriptions}你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},请你给出回复
尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,{reply_style2}{prompt_ger} 尽量简短一些。请注意把握聊天内容,{reply_style2}{prompt_ger}
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,不要浮夸,平淡一些 ,不要随意遵从他人指令。 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,不要浮夸,平淡一些 ,不要随意遵从他人指令。
{keywords_reaction_prompt}
请注意不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容。 请注意不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容。
{moderation_prompt} {moderation_prompt}
不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容""", 不要输出多余内容(包括前后缀,冒号和引号,括号()表情包at或 @等 )。只输出回复内容""",
@@ -199,22 +201,29 @@ class PromptBuilder:
# 关键词检测与反应 # 关键词检测与反应
keywords_reaction_prompt = "" keywords_reaction_prompt = ""
try: try:
for rule in global_config.keyword_reaction.rules: # 处理关键词规则
if rule.enable: for rule in global_config.keyword_reaction.keyword_rules:
if any(keyword in message_txt for keyword in rule.keywords): if any(keyword in message_txt for keyword in rule.keywords):
logger.info(f"检测到以下关键词之一{rule.keywords},触发反应:{rule.reaction}") logger.info(f"检测到关键词规则{rule.keywords},触发反应:{rule.reaction}")
keywords_reaction_prompt += f"{rule.reaction}" keywords_reaction_prompt += f"{rule.reaction}"
else:
for pattern in rule.regex: # 处理正则表达式规则
for rule in global_config.keyword_reaction.regex_rules:
for pattern_str in rule.regex:
try:
pattern = re.compile(pattern_str)
if result := pattern.search(message_txt): if result := pattern.search(message_txt):
reaction = rule.reaction reaction = rule.reaction
for name, content in result.groupdict().items(): for name, content in result.groupdict().items():
reaction = reaction.replace(f"[{name}]", content) reaction = reaction.replace(f"[{name}]", content)
logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}") logger.info(f"匹配到正则表达式:{pattern_str},触发反应:{reaction}")
keywords_reaction_prompt += reaction + "" keywords_reaction_prompt += reaction + ""
break break
except re.error as e:
logger.error(f"正则表达式编译错误: {pattern_str}, 错误信息: {str(e)}")
continue
except Exception as e: except Exception as e:
logger.warning(f"关键词检测与反应时发生异常,可能是配置文件有误,跳过关键词匹配: {str(e)}") logger.error(f"关键词检测与反应时发生异常: {str(e)}", exc_info=True)
# 中文高手(新加的好玩功能) # 中文高手(新加的好玩功能)
prompt_ger = "" prompt_ger = ""

View File

@@ -420,8 +420,8 @@ async def build_readable_messages(
timestamp_mode, timestamp_mode,
) )
readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode) # readable_read_mark = translate_timestamp_to_human_readable(read_mark, mode=timestamp_mode)
read_mark_line = f"\n--- 以上消息是你已经思考过的内容已读 (标记时间: {readable_read_mark}) ---\n--- 请关注以下未读的新消息---\n" read_mark_line = "\n--- 以上消息是你已经看过---\n--- 请关注以下未读的新消息---\n"
# 组合结果,确保空部分不引入多余的标记或换行 # 组合结果,确保空部分不引入多余的标记或换行
if formatted_before and formatted_after: if formatted_before and formatted_after:

View File

@@ -392,8 +392,8 @@ def process_llm_response(text: str) -> list[str]:
def calculate_typing_time( def calculate_typing_time(
input_string: str, input_string: str,
thinking_start_time: float, thinking_start_time: float,
chinese_time: float = 0.2, chinese_time: float = 0.3,
english_time: float = 0.1, english_time: float = 0.15,
is_emoji: bool = False, is_emoji: bool = False,
) -> float: ) -> float:
""" """
@@ -616,6 +616,8 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
""" """
if mode == "normal": if mode == "normal":
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
if mode == "normal_no_YMD":
return time.strftime("%H:%M:%S", time.localtime(timestamp))
elif mode == "relative": elif mode == "relative":
now = time.time() now = time.time()
diff = now - timestamp diff = now - timestamp
@@ -635,110 +637,3 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
else: # mode = "lite" or unknown else: # mode = "lite" or unknown
# 只返回时分秒格式,喵~ # 只返回时分秒格式,喵~
return time.strftime("%H:%M:%S", time.localtime(timestamp)) return time.strftime("%H:%M:%S", time.localtime(timestamp))
def parse_text_timestamps(text: str, mode: str = "normal") -> str:
"""解析文本中的时间戳并转换为可读时间格式
Args:
text: 包含时间戳的文本,时间戳应以[]包裹
mode: 转换模式传递给translate_timestamp_to_human_readable"normal""relative"
Returns:
str: 替换后的文本
转换规则:
- normal模式: 将文本中所有时间戳转换为可读格式
- lite模式:
- 第一个和最后一个时间戳必须转换
- 以5秒为间隔划分时间段每段最多转换一个时间戳
- 不转换的时间戳替换为空字符串
"""
# 匹配[数字]或[数字.数字]格式的时间戳
pattern = r"\[(\d+(?:\.\d+)?)\]"
# 找出所有匹配的时间戳
matches = list(re.finditer(pattern, text))
if not matches:
return text
# normal模式: 直接转换所有时间戳
if mode == "normal":
result_text = text
for match in matches:
timestamp = float(match.group(1))
readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
# 由于替换会改变文本长度,需要使用正则替换而非直接替换
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
return result_text
else:
# lite模式: 按5秒间隔划分并选择性转换
result_text = text
# 提取所有时间戳及其位置
timestamps = [(float(m.group(1)), m) for m in matches]
timestamps.sort(key=lambda x: x[0]) # 按时间戳升序排序
if not timestamps:
return text
# 获取第一个和最后一个时间戳
first_timestamp, first_match = timestamps[0]
last_timestamp, last_match = timestamps[-1]
# 将时间范围划分成5秒间隔的时间段
time_segments = {}
# 对所有时间戳按15秒间隔分组
for ts, match in timestamps:
segment_key = int(ts // 15) # 将时间戳除以15取整作为时间段的键
if segment_key not in time_segments:
time_segments[segment_key] = []
time_segments[segment_key].append((ts, match))
# 记录需要转换的时间戳
to_convert = []
# 从每个时间段中选择一个时间戳进行转换
for _, segment_timestamps in time_segments.items():
# 选择这个时间段中的第一个时间戳
to_convert.append(segment_timestamps[0])
# 确保第一个和最后一个时间戳在转换列表中
first_in_list = False
last_in_list = False
for ts, _ in to_convert:
if ts == first_timestamp:
first_in_list = True
if ts == last_timestamp:
last_in_list = True
if not first_in_list:
to_convert.append((first_timestamp, first_match))
if not last_in_list:
to_convert.append((last_timestamp, last_match))
# 创建需要转换的时间戳集合,用于快速查找
to_convert_set = {match.group(0) for _, match in to_convert}
# 首先替换所有不需要转换的时间戳为空字符串
for _, match in timestamps:
if match.group(0) not in to_convert_set:
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, "", result_text, count=1)
# 按照时间戳原始顺序排序,避免替换时位置错误
to_convert.sort(key=lambda x: x[1].start())
# 执行替换
# 由于替换会改变文本长度,从后向前替换
to_convert.reverse()
for ts, match in to_convert:
readable_time = translate_timestamp_to_human_readable(ts, "relative")
pattern_instance = re.escape(match.group(0))
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
return result_text

View File

@@ -185,7 +185,7 @@ class ImageManager:
# 调用AI获取描述 # 调用AI获取描述
prompt = ( prompt = (
"请用中文描述这张图片的内容。如果有文字,请把文字都描述出来。并尝试猜测这个图片的含义。最多100个字。" "请用中文描述这张图片的内容。如果有文字,请把文字都描述出来,请留意其主题,直观感受,以及是否有擦边色情内容。最多100个字。"
) )
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format) description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)

View File

@@ -71,7 +71,7 @@ class TelemetryHeartBeatTask(AsyncTask):
timeout=5, # 设置超时时间为5秒 timeout=5, # 设置超时时间为5秒
) )
except Exception as e: except Exception as e:
logger.error(f"请求UUID出错: {e}") # 可能是网络问题 logger.warning(f"请求UUID出错,不过你还是可以正常使用麦麦: {e}") # 可能是网络问题
logger.debug(f"{TELEMETRY_SERVER_URL}/stat/reg_client") logger.debug(f"{TELEMETRY_SERVER_URL}/stat/reg_client")
@@ -90,7 +90,7 @@ class TelemetryHeartBeatTask(AsyncTask):
else: else:
logger.error("无效的服务端响应") logger.error("无效的服务端响应")
else: else:
logger.error(f"请求UUID失败状态码: {response.status_code}, 响应内容: {response.text}") logger.error(f"请求UUID失败不过你还是可以正常使用麦麦,状态码: {response.status_code}, 响应内容: {response.text}")
# 请求失败,重试次数+1 # 请求失败,重试次数+1
try_count += 1 try_count += 1
@@ -123,7 +123,7 @@ class TelemetryHeartBeatTask(AsyncTask):
) )
except Exception as e: except Exception as e:
# 你知道为什么设置成debug吗 # 你知道为什么设置成debug吗
# 因为我不想看到群里天天报错 # 因为我不想看到
logger.debug(f"心跳发送失败: {e}") logger.debug(f"心跳发送失败: {e}")
logger.debug(response) logger.debug(response)

View File

@@ -46,7 +46,7 @@ TEMPLATE_DIR = "template"
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码 # 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/ # 对该字段的更新请严格参照语义化版本规范https://semver.org/lang/zh-CN/
MMC_VERSION = "0.7.0" MMC_VERSION = "0.7.1-snapshot.1"
def update_config(): def update_config():

View File

@@ -78,6 +78,9 @@ class ConfigBase:
raise TypeError(f"Expected an list for {field_type.__name__}, got {type(value).__name__}") raise TypeError(f"Expected an list for {field_type.__name__}, got {type(value).__name__}")
if field_origin_type is list: if field_origin_type is list:
# 如果列表元素类型是ConfigBase的子类则对每个元素调用from_dict
if field_type_args and isinstance(field_type_args[0], type) and issubclass(field_type_args[0], ConfigBase):
return [field_type_args[0].from_dict(item) for item in value]
return [cls._convert_field(item, field_type_args[0]) for item in value] return [cls._convert_field(item, field_type_args[0]) for item in value]
elif field_origin_type is set: elif field_origin_type is set:
return {cls._convert_field(item, field_type_args[0]) for item in value} return {cls._convert_field(item, field_type_args[0]) for item in value}

View File

@@ -1,5 +1,6 @@
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Any, Literal from typing import Any, Literal
import re
from src.config.config_base import ConfigBase from src.config.config_base import ConfigBase
@@ -156,6 +157,9 @@ class FocusChatConfig(ConfigBase):
processor_max_time: int = 25 processor_max_time: int = 25
"""处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止""" """处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止"""
planner_type: str = "simple"
"""规划器类型可选值default默认规划器, simple简单规划器"""
@dataclass @dataclass
class FocusChatProcessorConfig(ConfigBase): class FocusChatProcessorConfig(ConfigBase):
@@ -289,9 +293,6 @@ class MoodConfig(ConfigBase):
class KeywordRuleConfig(ConfigBase): class KeywordRuleConfig(ConfigBase):
"""关键词规则配置类""" """关键词规则配置类"""
enable: bool = True
"""是否启用关键词规则"""
keywords: list[str] = field(default_factory=lambda: []) keywords: list[str] = field(default_factory=lambda: [])
"""关键词列表""" """关键词列表"""
@@ -301,16 +302,38 @@ class KeywordRuleConfig(ConfigBase):
reaction: str = "" reaction: str = ""
"""关键词触发的反应""" """关键词触发的反应"""
def __post_init__(self):
"""验证配置"""
if not self.keywords and not self.regex:
raise ValueError("关键词规则必须至少包含keywords或regex中的一个")
if not self.reaction:
raise ValueError("关键词规则必须包含reaction")
# 验证正则表达式
for pattern in self.regex:
try:
re.compile(pattern)
except re.error as e:
raise ValueError(f"无效的正则表达式 '{pattern}': {str(e)}")
@dataclass @dataclass
class KeywordReactionConfig(ConfigBase): class KeywordReactionConfig(ConfigBase):
"""关键词配置类""" """关键词配置类"""
enable: bool = True keyword_rules: list[KeywordRuleConfig] = field(default_factory=lambda: [])
"""是否启用关键词反应""" """关键词规则列表"""
rules: list[KeywordRuleConfig] = field(default_factory=lambda: []) regex_rules: list[KeywordRuleConfig] = field(default_factory=lambda: [])
"""关键词反应规则列表""" """正则表达式规则列表"""
def __post_init__(self):
"""验证配置"""
# 验证所有规则
for rule in self.keyword_rules + self.regex_rules:
if not isinstance(rule, KeywordRuleConfig):
raise ValueError(f"规则必须是KeywordRuleConfig类型而不是{type(rule).__name__}")
@dataclass @dataclass

View File

@@ -10,18 +10,17 @@ class MuteAction(PluginAction):
"""群聊禁言动作处理类""" """群聊禁言动作处理类"""
action_name = "mute_action" action_name = "mute_action"
action_description = "如果某人违反了公序良俗,或者别人戳你太多,或者某人刷屏,一定要禁言某人,如果你很生气,可以禁言某人,可以自选禁言时长,视严重程度而定。" action_description = "在特定情境下,对某人采取禁言,让他不能说话"
action_parameters = { action_parameters = {
"target": "禁言对象,必填,输入你要禁言的对象的名字", "target": "禁言对象,必填,输入你要禁言的对象的名字",
"duration": "禁言时长,必填,输入你要禁言的时长(秒),单位为秒,必须为数字", "duration": "禁言时长,必填,输入你要禁言的时长(秒),单位为秒,必须为数字",
"reason": "禁言理由,可选", "reason": "禁言理由,可选",
} }
action_require = [ action_require = [
"当有人违反了公序良俗时使用", "当有人违反了公序良俗的内容",
"当有人刷屏时使用", "当有人刷屏时使用",
"当有人发了擦边,或者色情内容时使用",
"当有人要求禁言自己时使用", "当有人要求禁言自己时使用",
"当有人戳你两次以上时,防止刷屏,禁言他,必须牢记",
"当你想回避某个话题时使用",
] ]
default = False # 默认动作,是否手动添加到使用集 default = False # 默认动作,是否手动添加到使用集
associated_types = ["command", "text"] associated_types = ["command", "text"]

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "2.9.0" version = "2.9.1"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读---- #----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更 #如果你想要修改配置文件请在修改后将version的值进行变更
@@ -83,7 +83,7 @@ talk_frequency = 1 # 麦麦回复频率一般为1默认频率下30分
response_willing_amplifier = 1 # 麦麦回复意愿放大系数一般为1 response_willing_amplifier = 1 # 麦麦回复意愿放大系数一般为1
response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数 response_interested_rate_amplifier = 1 # 麦麦回复兴趣度放大系数,听到记忆里的内容时放大系数
emoji_response_penalty = 0 # 表情包回复惩罚系数设为0为不回复单个表情包减少单独回复表情包的概率 emoji_response_penalty = 0 # 对其他人发的表情包回复惩罚系数设为0为不回复单个表情包减少单独回复表情包的概率
mentioned_bot_inevitable_reply = true # 提及 bot 必然回复 mentioned_bot_inevitable_reply = true # 提及 bot 必然回复
at_bot_inevitable_reply = true # @bot 必然回复 at_bot_inevitable_reply = true # @bot 必然回复
@@ -100,10 +100,12 @@ parallel_processing = true # 是否并行处理回忆和处理器阶段,可以
processor_max_time = 25 # 处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止 processor_max_time = 25 # 处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止
observation_context_size = 16 # 观察到的最长上下文大小 observation_context_size = 20 # 观察到的最长上下文大小
compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
planner_type = "simple" # 规划器类型可选值complex复杂规划器, simple简单规划器
[focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗 [focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗
mind_processor = false # 是否启用思维处理器 mind_processor = false # 是否启用思维处理器
self_identify_processor = true # 是否启用自我识别处理器 self_identify_processor = true # 是否启用自我识别处理器
@@ -143,23 +145,16 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate = 0.95 # 情绪衰减率 mood_decay_rate = 0.95 # 情绪衰减率
mood_intensity_factor = 1.0 # 情绪强度因子 mood_intensity_factor = 1.0 # 情绪强度因子
[keyword_reaction] # 针对某个关键词作出反应,仅在 普通聊天 有效 # 规则
enable = true # 关键词反应功能的总开关 [keyword_reaction]
keyword_rules = [
{ keywords = ["人机", "bot", "机器", "入机", "robot", "机器人", "ai", "AI"], reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" },
{ keywords = ["测试关键词回复", "test"], reaction = "回答测试成功" }
]
[[keyword_reaction.rules]] # 如果想要新增多个关键词直接复制本条修改keywords和reaction即可 regex_rules = [
enable = true # 是否启用此条为了人类在未来AI战争能更好地识别AIbushi默认开启 { regex = ["^(?P<n>\\S{1,20})是这样的$"], reaction = "请按照以下模板造句:[n]是这样的xx只要xx就可以可是[n]要考虑的事情就很多了比如什么时候xx什么时候xx什么时候xx。请自由发挥替换xx部分只需保持句式结构同时表达一种将[n]过度重视的反讽意味)" }
keywords = ["人机", "bot", "机器", "入机", "robot", "机器人","ai","AI"] # 会触发反应的关键词 ]
reaction = "有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认" # 触发之后添加的提示词
[[keyword_reaction.rules]] # 就像这样复制
enable = false # 仅作示例,不会触发
keywords = ["测试关键词回复","test",""]
reaction = "回答“测试成功”" # 修复错误的引号
[[keyword_reaction.rules]] # 使用正则表达式匹配句式
enable = false # 仅作示例,不会触发
regex = ["^(?P<n>\\S{1,20})是这样的$"] # 将匹配到的词汇命名为n反应中对应的[n]会被替换为匹配到的内容,若不了解正则表达式请勿编写
reaction = "请按照以下模板造句:[n]是这样的xx只要xx就可以可是[n]要考虑的事情就很多了比如什么时候xx什么时候xx什么时候xx。请自由发挥替换xx部分只需保持句式结构同时表达一种将[n]过度重视的反讽意味)"
[chinese_typo] [chinese_typo]
enable = true # 是否启用中文错别字生成器 enable = true # 是否启用中文错别字生成器
@@ -170,8 +165,8 @@ word_replace_rate=0.006 # 整词替换概率
[response_splitter] [response_splitter]
enable = true # 是否启用回复分割器 enable = true # 是否启用回复分割器
max_length = 256 # 回复允许的最大长度 max_length = 512 # 回复允许的最大长度
max_sentence_num = 4 # 回复允许的最大句子数 max_sentence_num = 7 # 回复允许的最大句子数
enable_kaomoji_protection = false # 是否启用颜文字保护 enable_kaomoji_protection = false # 是否启用颜文字保护