Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into dev
This commit is contained in:
@@ -80,14 +80,16 @@ class ExpressionSelector:
|
||||
)
|
||||
|
||||
def get_random_expressions(
|
||||
self, chat_id: str, style_num: int, grammar_num: int, personality_num: int
|
||||
self, chat_id: str, total_num: int, style_percentage: float, grammar_percentage: float
|
||||
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
|
||||
(
|
||||
learnt_style_expressions,
|
||||
learnt_grammar_expressions,
|
||||
personality_expressions,
|
||||
) = self.expression_learner.get_expression_by_chat_id(chat_id)
|
||||
|
||||
style_num = int(total_num * style_percentage)
|
||||
grammar_num = int(total_num * grammar_percentage)
|
||||
|
||||
# 按权重抽样(使用count作为权重)
|
||||
if learnt_style_expressions:
|
||||
style_weights = [expr.get("count", 1) for expr in learnt_style_expressions]
|
||||
@@ -101,13 +103,7 @@ class ExpressionSelector:
|
||||
else:
|
||||
selected_grammar = []
|
||||
|
||||
if personality_expressions:
|
||||
personality_weights = [expr.get("count", 1) for expr in personality_expressions]
|
||||
selected_personality = weighted_sample(personality_expressions, personality_weights, personality_num)
|
||||
else:
|
||||
selected_personality = []
|
||||
|
||||
return selected_style, selected_grammar, selected_personality
|
||||
return selected_style, selected_grammar
|
||||
|
||||
def update_expressions_count_batch(self, expressions_to_update: List[Dict[str, str]], increment: float = 0.1):
|
||||
"""对一批表达方式更新count值,按文件分组后一次性写入"""
|
||||
@@ -174,7 +170,7 @@ class ExpressionSelector:
|
||||
"""使用LLM选择适合的表达方式"""
|
||||
|
||||
# 1. 获取35个随机表达方式(现在按权重抽取)
|
||||
style_exprs, grammar_exprs, personality_exprs = self.get_random_expressions(chat_id, 25, 25, 10)
|
||||
style_exprs, grammar_exprs = self.get_random_expressions(chat_id, 50, 0.5, 0.5)
|
||||
|
||||
# 2. 构建所有表达方式的索引和情境列表
|
||||
all_expressions = []
|
||||
@@ -196,14 +192,6 @@ class ExpressionSelector:
|
||||
all_expressions.append(expr_with_type)
|
||||
all_situations.append(f"{len(all_expressions)}.{expr['situation']}")
|
||||
|
||||
# 添加personality表达方式
|
||||
for expr in personality_exprs:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
expr_with_type = expr.copy()
|
||||
expr_with_type["type"] = "style_personality"
|
||||
all_expressions.append(expr_with_type)
|
||||
all_situations.append(f"{len(all_expressions)}.{expr['situation']}")
|
||||
|
||||
if not all_expressions:
|
||||
logger.warning("没有找到可用的表达方式")
|
||||
return []
|
||||
@@ -260,7 +248,7 @@ class ExpressionSelector:
|
||||
|
||||
# 对选中的所有表达方式,一次性更新count数
|
||||
if valid_expressions:
|
||||
self.update_expressions_count_batch(valid_expressions, 0.003)
|
||||
self.update_expressions_count_batch(valid_expressions, 0.006)
|
||||
|
||||
# logger.info(f"LLM从{len(all_expressions)}个情境中选择了{len(valid_expressions)}个")
|
||||
return valid_expressions
|
||||
|
||||
@@ -74,16 +74,13 @@ class ExpressionLearner:
|
||||
)
|
||||
self.llm_model = None
|
||||
|
||||
def get_expression_by_chat_id(
|
||||
self, chat_id: str
|
||||
) -> Tuple[List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]]:
|
||||
def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
|
||||
"""
|
||||
获取指定chat_id的style和grammar表达方式, 同时获取全局的personality表达方式
|
||||
获取指定chat_id的style和grammar表达方式
|
||||
返回的每个表达方式字典中都包含了source_id, 用于后续的更新操作
|
||||
"""
|
||||
learnt_style_expressions = []
|
||||
learnt_grammar_expressions = []
|
||||
personality_expressions = []
|
||||
|
||||
# 获取style表达方式
|
||||
style_dir = os.path.join("data", "expression", "learnt_style", str(chat_id))
|
||||
@@ -111,19 +108,7 @@ class ExpressionLearner:
|
||||
except Exception as e:
|
||||
logger.error(f"读取grammar表达方式失败: {e}")
|
||||
|
||||
# 获取personality表达方式
|
||||
personality_file = os.path.join("data", "expression", "personality", "expressions.json")
|
||||
if os.path.exists(personality_file):
|
||||
try:
|
||||
with open(personality_file, "r", encoding="utf-8") as f:
|
||||
expressions = json.load(f)
|
||||
for expr in expressions:
|
||||
expr["source_id"] = "personality" # 添加来源ID
|
||||
personality_expressions.append(expr)
|
||||
except Exception as e:
|
||||
logger.error(f"读取personality表达方式失败: {e}")
|
||||
|
||||
return learnt_style_expressions, learnt_grammar_expressions, personality_expressions
|
||||
return learnt_style_expressions, learnt_grammar_expressions
|
||||
|
||||
def is_similar(self, s1: str, s2: str) -> bool:
|
||||
"""
|
||||
@@ -428,6 +413,7 @@ class ExpressionLearner:
|
||||
|
||||
init_prompt()
|
||||
|
||||
|
||||
expression_learner = None
|
||||
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ class CycleDetail:
|
||||
self.loop_processor_info: Dict[str, Any] = {} # 前处理器信息
|
||||
self.loop_plan_info: Dict[str, Any] = {}
|
||||
self.loop_action_info: Dict[str, Any] = {}
|
||||
self.loop_post_processor_info: Dict[str, Any] = {} # 后处理器信息
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""将循环信息转换为字典格式"""
|
||||
@@ -80,7 +79,6 @@ class CycleDetail:
|
||||
"loop_processor_info": convert_to_serializable(self.loop_processor_info),
|
||||
"loop_plan_info": convert_to_serializable(self.loop_plan_info),
|
||||
"loop_action_info": convert_to_serializable(self.loop_action_info),
|
||||
"loop_post_processor_info": convert_to_serializable(self.loop_post_processor_info),
|
||||
}
|
||||
|
||||
def complete_cycle(self):
|
||||
@@ -135,4 +133,3 @@ class CycleDetail:
|
||||
self.loop_processor_info = loop_info["loop_processor_info"]
|
||||
self.loop_plan_info = loop_info["loop_plan_info"]
|
||||
self.loop_action_info = loop_info["loop_action_info"]
|
||||
self.loop_post_processor_info = loop_info["loop_post_processor_info"]
|
||||
|
||||
@@ -17,9 +17,8 @@ from src.chat.focus_chat.info_processors.working_memory_processor import Working
|
||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
||||
from src.chat.heart_flow.observation.actions_observation import ActionObservation
|
||||
from src.chat.focus_chat.info_processors.tool_processor import ToolProcessor
|
||||
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
from src.chat.focus_chat.info_processors.base_processor import BaseProcessor
|
||||
from src.chat.focus_chat.planners.planner_factory import PlannerFactory
|
||||
@@ -28,21 +27,18 @@ from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.config.config import global_config
|
||||
from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger
|
||||
from src.chat.focus_chat.hfc_version_manager import get_hfc_version
|
||||
from src.chat.focus_chat.info.structured_info import StructuredInfo
|
||||
from src.person_info.relationship_builder_manager import relationship_builder_manager
|
||||
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
# 超时常量配置
|
||||
ACTION_MODIFICATION_TIMEOUT = 15.0 # 动作修改任务超时时限(秒)
|
||||
# 注释:原来的动作修改超时常量已移除,因为改为顺序执行
|
||||
|
||||
# 定义观察器映射:键是观察器名称,值是 (观察器类, 初始化参数)
|
||||
OBSERVATION_CLASSES = {
|
||||
"ChattingObservation": (ChattingObservation, "chat_id"),
|
||||
"WorkingMemoryObservation": (WorkingMemoryObservation, "observe_id"),
|
||||
"HFCloopObservation": (HFCloopObservation, "observe_id"),
|
||||
"StructureObservation": (StructureObservation, "observe_id"),
|
||||
}
|
||||
|
||||
# 定义处理器映射:键是处理器名称,值是 (处理器类, 可选的配置键名)
|
||||
@@ -51,11 +47,6 @@ PROCESSOR_CLASSES = {
|
||||
"WorkingMemoryProcessor": (WorkingMemoryProcessor, "working_memory_processor"),
|
||||
}
|
||||
|
||||
# 定义后期处理器映射:在规划后、动作执行前运行的处理器
|
||||
POST_PLANNING_PROCESSOR_CLASSES = {
|
||||
"ToolProcessor": (ToolProcessor, "tool_use_processor"),
|
||||
}
|
||||
|
||||
logger = get_logger("hfc") # Logger Name Changed
|
||||
|
||||
|
||||
@@ -120,31 +111,13 @@ class HeartFChatting:
|
||||
self._register_observations()
|
||||
|
||||
# 根据配置文件和默认规则确定启用的处理器
|
||||
config_processor_settings = global_config.focus_chat_processor
|
||||
self.enabled_processor_names = []
|
||||
|
||||
for proc_name, (_proc_class, config_key) in PROCESSOR_CLASSES.items():
|
||||
# 检查处理器是否应该启用
|
||||
if not config_key or getattr(config_processor_settings, config_key, True):
|
||||
self.enabled_processor_names.append(proc_name)
|
||||
|
||||
# 初始化后期处理器(规划后执行的处理器)
|
||||
self.enabled_post_planning_processor_names = []
|
||||
for proc_name, (_proc_class, config_key) in POST_PLANNING_PROCESSOR_CLASSES.items():
|
||||
# 对于关系相关处理器,需要同时检查关系配置项
|
||||
if not config_key or getattr(config_processor_settings, config_key, True):
|
||||
self.enabled_post_planning_processor_names.append(proc_name)
|
||||
|
||||
# logger.info(f"{self.log_prefix} 将启用的处理器: {self.enabled_processor_names}")
|
||||
# logger.info(f"{self.log_prefix} 将启用的后期处理器: {self.enabled_post_planning_processor_names}")
|
||||
self.enabled_processor_names = ["ChattingInfoProcessor"]
|
||||
if global_config.focus_chat.working_memory_processor:
|
||||
self.enabled_processor_names.append("WorkingMemoryProcessor")
|
||||
|
||||
self.processors: List[BaseProcessor] = []
|
||||
self._register_default_processors()
|
||||
|
||||
# 初始化后期处理器
|
||||
self.post_planning_processors: List[BaseProcessor] = []
|
||||
self._register_post_planning_processors()
|
||||
|
||||
self.action_manager = ActionManager()
|
||||
self.action_planner = PlannerFactory.create_planner(
|
||||
log_prefix=self.log_prefix, action_manager=self.action_manager
|
||||
@@ -186,7 +159,7 @@ class HeartFChatting:
|
||||
# 检查是否需要跳过WorkingMemoryObservation
|
||||
if name == "WorkingMemoryObservation":
|
||||
# 如果工作记忆处理器被禁用,则跳过WorkingMemoryObservation
|
||||
if not global_config.focus_chat_processor.working_memory_processor:
|
||||
if not global_config.focus_chat.working_memory_processor:
|
||||
logger.debug(f"{self.log_prefix} 工作记忆处理器已禁用,跳过注册观察器 {name}")
|
||||
continue
|
||||
|
||||
@@ -211,16 +184,12 @@ class HeartFChatting:
|
||||
processor_info = PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key)
|
||||
if processor_info:
|
||||
processor_actual_class = processor_info[0] # 获取实际的类定义
|
||||
# 根据处理器类名判断是否需要 subheartflow_id
|
||||
if name in [
|
||||
"WorkingMemoryProcessor",
|
||||
]:
|
||||
self.processors.append(processor_actual_class(subheartflow_id=self.stream_id))
|
||||
elif name == "ChattingInfoProcessor":
|
||||
# 根据处理器类名判断构造参数
|
||||
if name == "ChattingInfoProcessor":
|
||||
self.processors.append(processor_actual_class())
|
||||
elif name == "WorkingMemoryProcessor":
|
||||
self.processors.append(processor_actual_class(subheartflow_id=self.stream_id))
|
||||
else:
|
||||
# 对于PROCESSOR_CLASSES中定义但此处未明确处理构造的处理器
|
||||
# (例如, 新增了一个处理器到PROCESSOR_CLASSES, 它不需要id, 也不叫ChattingInfoProcessor)
|
||||
try:
|
||||
self.processors.append(processor_actual_class()) # 尝试无参构造
|
||||
logger.debug(f"{self.log_prefix} 注册处理器 {name} (尝试无参构造).")
|
||||
@@ -229,7 +198,6 @@ class HeartFChatting:
|
||||
f"{self.log_prefix} 处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id)但未在注册逻辑中明确处理。"
|
||||
)
|
||||
else:
|
||||
# 这理论上不应该发生,因为 enabled_processor_names 是从 PROCESSOR_CLASSES 的键生成的
|
||||
logger.warning(
|
||||
f"{self.log_prefix} 在 PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。"
|
||||
)
|
||||
@@ -239,47 +207,6 @@ class HeartFChatting:
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 没有注册任何处理器。这可能是由于配置错误或所有处理器都被禁用了。")
|
||||
|
||||
def _register_post_planning_processors(self):
|
||||
"""根据 self.enabled_post_planning_processor_names 注册后期处理器"""
|
||||
self.post_planning_processors = [] # 清空已有的
|
||||
|
||||
for name in self.enabled_post_planning_processor_names: # 'name' is "PersonImpressionpProcessor", etc.
|
||||
processor_info = POST_PLANNING_PROCESSOR_CLASSES.get(name) # processor_info is (ProcessorClass, config_key)
|
||||
if processor_info:
|
||||
processor_actual_class = processor_info[0] # 获取实际的类定义
|
||||
# 根据处理器类名判断是否需要 subheartflow_id
|
||||
if name in [
|
||||
"ToolProcessor",
|
||||
"RelationshipBuildProcessor",
|
||||
"RealTimeInfoProcessor",
|
||||
"ExpressionSelectorProcessor",
|
||||
]:
|
||||
self.post_planning_processors.append(processor_actual_class(subheartflow_id=self.stream_id))
|
||||
else:
|
||||
# 对于POST_PLANNING_PROCESSOR_CLASSES中定义但此处未明确处理构造的处理器
|
||||
# (例如, 新增了一个处理器到POST_PLANNING_PROCESSOR_CLASSES, 它不需要id, 也不叫PersonImpressionpProcessor)
|
||||
try:
|
||||
self.post_planning_processors.append(processor_actual_class()) # 尝试无参构造
|
||||
logger.debug(f"{self.log_prefix} 注册后期处理器 {name} (尝试无参构造).")
|
||||
except TypeError:
|
||||
logger.error(
|
||||
f"{self.log_prefix} 后期处理器 {name} 构造失败。它可能需要参数(如 subheartflow_id)但未在注册逻辑中明确处理。"
|
||||
)
|
||||
else:
|
||||
# 这理论上不应该发生,因为 enabled_post_planning_processor_names 是从 POST_PLANNING_PROCESSOR_CLASSES 的键生成的
|
||||
logger.warning(
|
||||
f"{self.log_prefix} 在 POST_PLANNING_PROCESSOR_CLASSES 中未找到名为 '{name}' 的处理器定义,将跳过注册。"
|
||||
)
|
||||
|
||||
if self.post_planning_processors:
|
||||
logger.info(
|
||||
f"{self.log_prefix} 已注册后期处理器: {[p.__class__.__name__ for p in self.post_planning_processors]}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"{self.log_prefix} 没有注册任何后期处理器。这可能是由于配置错误或所有后期处理器都被禁用了。"
|
||||
)
|
||||
|
||||
async def start(self):
|
||||
"""检查是否需要启动主循环,如果未激活则启动。"""
|
||||
logger.debug(f"{self.log_prefix} 开始启动 HeartFChatting")
|
||||
@@ -460,27 +387,12 @@ class HeartFChatting:
|
||||
("\n前处理器耗时: " + "; ".join(processor_time_strings)) if processor_time_strings else ""
|
||||
)
|
||||
|
||||
# 新增:输出每个后处理器的耗时
|
||||
post_processor_time_costs = self._current_cycle_detail.loop_post_processor_info.get(
|
||||
"post_processor_time_costs", {}
|
||||
)
|
||||
post_processor_time_strings = []
|
||||
for pname, ptime in post_processor_time_costs.items():
|
||||
formatted_ptime = f"{ptime * 1000:.2f}毫秒" if ptime < 1 else f"{ptime:.2f}秒"
|
||||
post_processor_time_strings.append(f"{pname}: {formatted_ptime}")
|
||||
post_processor_time_log = (
|
||||
("\n后处理器耗时: " + "; ".join(post_processor_time_strings))
|
||||
if post_processor_time_strings
|
||||
else ""
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考,"
|
||||
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
|
||||
f"动作: {self._current_cycle_detail.loop_plan_info.get('action_result', {}).get('action_type', '未知动作')}"
|
||||
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
|
||||
+ processor_time_log
|
||||
+ post_processor_time_log
|
||||
)
|
||||
|
||||
# 记录性能数据
|
||||
@@ -491,8 +403,7 @@ class HeartFChatting:
|
||||
"action_type": action_result.get("action_type", "unknown"),
|
||||
"total_time": self._current_cycle_detail.end_time - self._current_cycle_detail.start_time,
|
||||
"step_times": cycle_timers.copy(),
|
||||
"processor_time_costs": processor_time_costs, # 前处理器时间
|
||||
"post_processor_time_costs": post_processor_time_costs, # 后处理器时间
|
||||
"processor_time_costs": processor_time_costs, # 处理器时间
|
||||
"reasoning": action_result.get("reasoning", ""),
|
||||
"success": self._current_cycle_detail.loop_action_info.get("action_taken", False),
|
||||
}
|
||||
@@ -634,123 +545,6 @@ class HeartFChatting:
|
||||
|
||||
return all_plan_info, processor_time_costs
|
||||
|
||||
async def _process_post_planning_processors_with_timing(
|
||||
self, observations: List[Observation], action_type: str, action_data: dict
|
||||
) -> tuple[dict, dict]:
|
||||
"""
|
||||
处理后期处理器(规划后执行的处理器)并收集详细时间统计
|
||||
包括:关系处理器、表达选择器、记忆激活器
|
||||
|
||||
参数:
|
||||
observations: 观察器列表
|
||||
action_type: 动作类型
|
||||
action_data: 原始动作数据
|
||||
|
||||
返回:
|
||||
tuple[dict, dict]: (更新后的动作数据, 后处理器时间统计)
|
||||
"""
|
||||
logger.info(f"{self.log_prefix} 开始执行后期处理器(带详细统计)")
|
||||
|
||||
# 创建所有后期任务
|
||||
task_list = []
|
||||
task_to_name_map = {}
|
||||
task_start_times = {}
|
||||
post_processor_time_costs = {}
|
||||
|
||||
# 添加后期处理器任务
|
||||
for processor in self.post_planning_processors:
|
||||
processor_name = processor.__class__.__name__
|
||||
|
||||
async def run_processor_with_timeout_and_timing(proc=processor, name=processor_name):
|
||||
start_time = time.time()
|
||||
try:
|
||||
result = await asyncio.wait_for(
|
||||
proc.process_info(observations=observations, action_type=action_type, action_data=action_data),
|
||||
30,
|
||||
)
|
||||
end_time = time.time()
|
||||
post_processor_time_costs[name] = end_time - start_time
|
||||
logger.debug(f"{self.log_prefix} 后期处理器 {name} 耗时: {end_time - start_time:.3f}秒")
|
||||
return result
|
||||
except Exception as e:
|
||||
end_time = time.time()
|
||||
post_processor_time_costs[name] = end_time - start_time
|
||||
logger.warning(f"{self.log_prefix} 后期处理器 {name} 执行异常,耗时: {end_time - start_time:.3f}秒")
|
||||
raise e
|
||||
|
||||
task = asyncio.create_task(run_processor_with_timeout_and_timing())
|
||||
task_list.append(task)
|
||||
task_to_name_map[task] = ("processor", processor_name)
|
||||
task_start_times[task] = time.time()
|
||||
logger.info(f"{self.log_prefix} 启动后期处理器任务: {processor_name}")
|
||||
|
||||
# 如果没有任何后期任务,直接返回
|
||||
if not task_list:
|
||||
logger.info(f"{self.log_prefix} 没有启用的后期处理器或记忆激活器")
|
||||
return action_data, {}
|
||||
|
||||
# 等待所有任务完成
|
||||
pending_tasks = set(task_list)
|
||||
all_post_plan_info = []
|
||||
|
||||
while pending_tasks:
|
||||
done, pending_tasks = await asyncio.wait(pending_tasks, return_when=asyncio.FIRST_COMPLETED)
|
||||
|
||||
for task in done:
|
||||
task_type, task_name = task_to_name_map[task]
|
||||
|
||||
try:
|
||||
result = await task
|
||||
|
||||
if task_type == "processor":
|
||||
logger.info(f"{self.log_prefix} 后期处理器 {task_name} 已完成!")
|
||||
if result is not None:
|
||||
all_post_plan_info.extend(result)
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 后期处理器 {task_name} 返回了 None")
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# 对于超时任务,记录已用时间
|
||||
elapsed_time = time.time() - task_start_times[task]
|
||||
if task_type == "processor":
|
||||
post_processor_time_costs[task_name] = elapsed_time
|
||||
logger.warning(
|
||||
f"{self.log_prefix} 后期处理器 {task_name} 超时(>30s),已跳过,耗时: {elapsed_time:.3f}秒"
|
||||
)
|
||||
except Exception as e:
|
||||
# 对于异常任务,记录已用时间
|
||||
elapsed_time = time.time() - task_start_times[task]
|
||||
if task_type == "processor":
|
||||
post_processor_time_costs[task_name] = elapsed_time
|
||||
logger.error(
|
||||
f"{self.log_prefix} 后期处理器 {task_name} 执行失败,耗时: {elapsed_time:.3f}秒. 错误: {e}",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
# 将后期处理器的结果整合到 action_data 中
|
||||
updated_action_data = action_data.copy()
|
||||
|
||||
structured_info = ""
|
||||
|
||||
for info in all_post_plan_info:
|
||||
if isinstance(info, StructuredInfo):
|
||||
structured_info = info.get_processed_info()
|
||||
|
||||
if structured_info:
|
||||
updated_action_data["structured_info"] = structured_info
|
||||
|
||||
if all_post_plan_info:
|
||||
logger.info(f"{self.log_prefix} 后期处理完成,产生了 {len(all_post_plan_info)} 个信息项")
|
||||
|
||||
# 输出详细统计信息
|
||||
if post_processor_time_costs:
|
||||
stats_str = ", ".join(
|
||||
[f"{name}: {time_cost:.3f}s" for name, time_cost in post_processor_time_costs.items()]
|
||||
)
|
||||
logger.info(f"{self.log_prefix} 后期处理器详细耗时统计: {stats_str}")
|
||||
|
||||
return updated_action_data, post_processor_time_costs
|
||||
|
||||
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
|
||||
try:
|
||||
loop_start_time = time.time()
|
||||
@@ -765,10 +559,10 @@ class HeartFChatting:
|
||||
|
||||
await self.relationship_builder.build_relation()
|
||||
|
||||
# 并行执行调整动作、回忆和处理器阶段
|
||||
with Timer("调整动作、处理", cycle_timers):
|
||||
# 创建并行任务
|
||||
async def modify_actions_task():
|
||||
# 顺序执行调整动作和处理器阶段
|
||||
# 第一步:动作修改
|
||||
with Timer("动作修改", cycle_timers):
|
||||
try:
|
||||
# 调用完整的动作修改流程
|
||||
await self.action_modifier.modify_actions(
|
||||
observations=self.observations,
|
||||
@@ -776,44 +570,17 @@ class HeartFChatting:
|
||||
|
||||
await self.action_observation.observe()
|
||||
self.observations.append(self.action_observation)
|
||||
return True
|
||||
|
||||
# 创建两个并行任务,为LLM调用添加超时保护
|
||||
action_modify_task = asyncio.create_task(
|
||||
asyncio.wait_for(modify_actions_task(), timeout=ACTION_MODIFICATION_TIMEOUT)
|
||||
)
|
||||
processor_task = asyncio.create_task(self._process_processors(self.observations))
|
||||
|
||||
# 等待两个任务完成,使用超时保护和详细错误处理
|
||||
action_modify_result = None
|
||||
all_plan_info = []
|
||||
processor_time_costs = {}
|
||||
|
||||
try:
|
||||
action_modify_result, (all_plan_info, processor_time_costs) = await asyncio.gather(
|
||||
action_modify_task, processor_task, return_exceptions=True
|
||||
)
|
||||
|
||||
# 检查各个任务的结果
|
||||
if isinstance(action_modify_result, Exception):
|
||||
if isinstance(action_modify_result, asyncio.TimeoutError):
|
||||
logger.error(f"{self.log_prefix} 动作修改任务超时")
|
||||
else:
|
||||
logger.error(f"{self.log_prefix} 动作修改任务失败: {action_modify_result}")
|
||||
|
||||
processor_result = (all_plan_info, processor_time_costs)
|
||||
if isinstance(processor_result, Exception):
|
||||
if isinstance(processor_result, asyncio.TimeoutError):
|
||||
logger.error(f"{self.log_prefix} 处理器任务超时")
|
||||
else:
|
||||
logger.error(f"{self.log_prefix} 处理器任务失败: {processor_result}")
|
||||
all_plan_info = []
|
||||
processor_time_costs = {}
|
||||
else:
|
||||
all_plan_info, processor_time_costs = processor_result
|
||||
|
||||
logger.debug(f"{self.log_prefix} 动作修改完成")
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 并行任务gather失败: {e}")
|
||||
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
|
||||
# 继续执行,不中断流程
|
||||
|
||||
# 第二步:信息处理器
|
||||
with Timer("信息处理器", cycle_timers):
|
||||
try:
|
||||
all_plan_info, processor_time_costs = await self._process_processors(self.observations)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 信息处理器失败: {e}")
|
||||
# 设置默认值以继续执行
|
||||
all_plan_info = []
|
||||
processor_time_costs = {}
|
||||
@@ -833,7 +600,6 @@ class HeartFChatting:
|
||||
"observed_messages": plan_result.get("observed_messages", ""),
|
||||
}
|
||||
|
||||
# 修正:将后期处理器从执行动作Timer中分离出来
|
||||
action_type, action_data, reasoning = (
|
||||
plan_result.get("action_result", {}).get("action_type", "error"),
|
||||
plan_result.get("action_result", {}).get("action_data", {}),
|
||||
@@ -849,22 +615,7 @@ class HeartFChatting:
|
||||
|
||||
logger.debug(f"{self.log_prefix} 麦麦想要:'{action_str}'")
|
||||
|
||||
# 添加:单独计时后期处理器,并收集详细统计
|
||||
post_processor_time_costs = {}
|
||||
if action_type != "no_reply":
|
||||
with Timer("后期处理器", cycle_timers):
|
||||
logger.debug(f"{self.log_prefix} 执行后期处理器(动作类型: {action_type})")
|
||||
# 记录详细的后处理器时间
|
||||
post_start_time = time.time()
|
||||
action_data, post_processor_time_costs = await self._process_post_planning_processors_with_timing(
|
||||
self.observations, action_type, action_data
|
||||
)
|
||||
post_end_time = time.time()
|
||||
logger.info(f"{self.log_prefix} 后期处理器总耗时: {post_end_time - post_start_time:.3f}秒")
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix} 跳过后期处理器(动作类型: {action_type})")
|
||||
|
||||
# 修正:纯动作执行计时
|
||||
# 动作执行计时
|
||||
with Timer("动作执行", cycle_timers):
|
||||
success, reply_text, command = await self._handle_action(
|
||||
action_type, reasoning, action_data, cycle_timers, thinking_id
|
||||
@@ -877,17 +628,11 @@ class HeartFChatting:
|
||||
"taken_time": time.time(),
|
||||
}
|
||||
|
||||
# 添加后处理器统计到loop_info
|
||||
loop_post_processor_info = {
|
||||
"post_processor_time_costs": post_processor_time_costs,
|
||||
}
|
||||
|
||||
loop_info = {
|
||||
"loop_observation_info": loop_observation_info,
|
||||
"loop_processor_info": loop_processor_info,
|
||||
"loop_plan_info": loop_plan_info,
|
||||
"loop_action_info": loop_action_info,
|
||||
"loop_post_processor_info": loop_post_processor_info, # 新增
|
||||
}
|
||||
|
||||
return loop_info
|
||||
|
||||
@@ -3,16 +3,14 @@ from src.config.config import global_config
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.chat.heart_flow.heartflow import heartflow
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager, ChatStream
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.utils.utils import is_mentioned_bot_in_message
|
||||
from src.chat.utils.timer_calculator import Timer
|
||||
from src.common.logger import get_logger
|
||||
|
||||
import math
|
||||
import re
|
||||
import math
|
||||
import traceback
|
||||
from typing import Optional, Tuple
|
||||
from maim_message import UserInfo
|
||||
|
||||
from src.person_info.relationship_manager import get_relationship_manager
|
||||
|
||||
@@ -90,46 +88,6 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
|
||||
return interested_rate, is_mentioned
|
||||
|
||||
|
||||
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
|
||||
"""检查消息是否包含过滤词
|
||||
|
||||
Args:
|
||||
text: 待检查的文本
|
||||
chat: 聊天对象
|
||||
userinfo: 用户信息
|
||||
|
||||
Returns:
|
||||
bool: 是否包含过滤词
|
||||
"""
|
||||
for word in global_config.message_receive.ban_words:
|
||||
if word in text:
|
||||
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
|
||||
logger.info(f"[过滤词识别]消息中含有{word},filtered")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
|
||||
"""检查消息是否匹配过滤正则表达式
|
||||
|
||||
Args:
|
||||
text: 待检查的文本
|
||||
chat: 聊天对象
|
||||
userinfo: 用户信息
|
||||
|
||||
Returns:
|
||||
bool: 是否匹配过滤正则
|
||||
"""
|
||||
for pattern in global_config.message_receive.ban_msgs_regex:
|
||||
if re.search(pattern, text):
|
||||
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
|
||||
logger.info(f"[正则表达式过滤]消息匹配到{pattern},filtered")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class HeartFCMessageReceiver:
|
||||
"""心流处理器,负责处理接收到的消息并计算兴趣度"""
|
||||
|
||||
@@ -167,12 +125,6 @@ class HeartFCMessageReceiver:
|
||||
subheartflow = await heartflow.get_or_create_subheartflow(chat.stream_id)
|
||||
message.update_chat_stream(chat)
|
||||
|
||||
# 3. 过滤检查
|
||||
if _check_ban_words(message.processed_plain_text, chat, userinfo) or _check_ban_regex(
|
||||
message.raw_message, chat, userinfo
|
||||
):
|
||||
return
|
||||
|
||||
# 6. 兴趣度计算与更新
|
||||
interested_rate, is_mentioned = await _calculate_interest(message)
|
||||
subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned)
|
||||
@@ -183,7 +135,6 @@ class HeartFCMessageReceiver:
|
||||
current_talk_frequency = global_config.chat.get_current_talk_frequency(chat.stream_id)
|
||||
|
||||
# 如果消息中包含图片标识,则日志展示为图片
|
||||
import re
|
||||
|
||||
picid_match = re.search(r"\[picid:([^\]]+)\]", message.processed_plain_text)
|
||||
if picid_match:
|
||||
|
||||
@@ -42,7 +42,6 @@ class HFCPerformanceLogger:
|
||||
"total_time": cycle_data.get("total_time", 0),
|
||||
"step_times": cycle_data.get("step_times", {}),
|
||||
"processor_time_costs": cycle_data.get("processor_time_costs", {}), # 前处理器时间
|
||||
"post_processor_time_costs": cycle_data.get("post_processor_time_costs", {}), # 后处理器时间
|
||||
"reasoning": cycle_data.get("reasoning", ""),
|
||||
"success": cycle_data.get("success", False),
|
||||
}
|
||||
@@ -60,13 +59,6 @@ class HFCPerformanceLogger:
|
||||
f"time={record['total_time']:.2f}s",
|
||||
]
|
||||
|
||||
# 添加后处理器时间信息到日志
|
||||
if record["post_processor_time_costs"]:
|
||||
post_processor_stats = ", ".join(
|
||||
[f"{name}: {time_cost:.3f}s" for name, time_cost in record["post_processor_time_costs"].items()]
|
||||
)
|
||||
log_parts.append(f"post_processors=({post_processor_stats})")
|
||||
|
||||
logger.debug(f"记录HFC循环数据: {', '.join(log_parts)}")
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -20,7 +20,7 @@ class HFCVersionManager:
|
||||
"""HFC版本号管理器"""
|
||||
|
||||
# 默认版本号
|
||||
DEFAULT_VERSION = "v4.0.0"
|
||||
DEFAULT_VERSION = "v5.0.0"
|
||||
|
||||
# 当前运行时版本号
|
||||
_current_version: Optional[str] = None
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Dict
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExpressionSelectionInfo(InfoBase):
|
||||
"""表达选择信息类
|
||||
|
||||
用于存储和管理选中的表达方式信息。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "expression_selection"
|
||||
data (Dict[str, Any]): 包含选中表达方式的数据字典
|
||||
"""
|
||||
|
||||
type: str = "expression_selection"
|
||||
|
||||
def get_selected_expressions(self) -> List[Dict[str, str]]:
|
||||
"""获取选中的表达方式列表
|
||||
|
||||
Returns:
|
||||
List[Dict[str, str]]: 选中的表达方式列表
|
||||
"""
|
||||
return self.get_info("selected_expressions") or []
|
||||
|
||||
def set_selected_expressions(self, expressions: List[Dict[str, str]]) -> None:
|
||||
"""设置选中的表达方式列表
|
||||
|
||||
Args:
|
||||
expressions: 选中的表达方式列表
|
||||
"""
|
||||
self.data["selected_expressions"] = expressions
|
||||
|
||||
def get_expressions_count(self) -> int:
|
||||
"""获取选中表达方式的数量
|
||||
|
||||
Returns:
|
||||
int: 表达方式数量
|
||||
"""
|
||||
return len(self.get_selected_expressions())
|
||||
|
||||
def get_processed_info(self) -> str:
|
||||
"""获取处理后的信息
|
||||
|
||||
Returns:
|
||||
str: 处理后的信息字符串
|
||||
"""
|
||||
expressions = self.get_selected_expressions()
|
||||
if not expressions:
|
||||
return ""
|
||||
|
||||
# 格式化表达方式为可读文本
|
||||
formatted_expressions = []
|
||||
for expr in expressions:
|
||||
situation = expr.get("situation", "")
|
||||
style = expr.get("style", "")
|
||||
expr.get("type", "")
|
||||
|
||||
if situation and style:
|
||||
formatted_expressions.append(f"当{situation}时,使用 {style}")
|
||||
|
||||
return "\n".join(formatted_expressions)
|
||||
|
||||
def get_expressions_for_action_data(self) -> List[Dict[str, str]]:
|
||||
"""获取用于action_data的表达方式数据
|
||||
|
||||
Returns:
|
||||
List[Dict[str, str]]: 格式化后的表达方式数据
|
||||
"""
|
||||
return self.get_selected_expressions()
|
||||
@@ -1,34 +0,0 @@
|
||||
from typing import Dict, Any
|
||||
from dataclasses import dataclass, field
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class MindInfo(InfoBase):
|
||||
"""思维信息类
|
||||
|
||||
用于存储和管理当前思维状态的信息。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "mind"
|
||||
data (Dict[str, Any]): 包含 current_mind 的数据字典
|
||||
"""
|
||||
|
||||
type: str = "mind"
|
||||
data: Dict[str, Any] = field(default_factory=lambda: {"current_mind": ""})
|
||||
|
||||
def get_current_mind(self) -> str:
|
||||
"""获取当前思维状态
|
||||
|
||||
Returns:
|
||||
str: 当前思维状态
|
||||
"""
|
||||
return self.get_info("current_mind") or ""
|
||||
|
||||
def set_current_mind(self, mind: str) -> None:
|
||||
"""设置当前思维状态
|
||||
|
||||
Args:
|
||||
mind: 要设置的思维状态
|
||||
"""
|
||||
self.data["current_mind"] = mind
|
||||
@@ -1,40 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from .info_base import InfoBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class RelationInfo(InfoBase):
|
||||
"""关系信息类
|
||||
|
||||
用于存储和管理当前关系状态的信息。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "relation"
|
||||
data (Dict[str, Any]): 包含 current_relation 的数据字典
|
||||
"""
|
||||
|
||||
type: str = "relation"
|
||||
|
||||
def get_relation_info(self) -> str:
|
||||
"""获取当前关系状态
|
||||
|
||||
Returns:
|
||||
str: 当前关系状态
|
||||
"""
|
||||
return self.get_info("relation_info") or ""
|
||||
|
||||
def set_relation_info(self, relation_info: str) -> None:
|
||||
"""设置当前关系状态
|
||||
|
||||
Args:
|
||||
relation_info: 要设置的关系状态
|
||||
"""
|
||||
self.data["relation_info"] = relation_info
|
||||
|
||||
def get_processed_info(self) -> str:
|
||||
"""获取处理后的信息
|
||||
|
||||
Returns:
|
||||
str: 处理后的信息
|
||||
"""
|
||||
return self.get_relation_info() or ""
|
||||
@@ -1,85 +0,0 @@
|
||||
from typing import Dict, Optional, Any, List
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class StructuredInfo:
|
||||
"""信息基类
|
||||
|
||||
这是一个基础信息类,用于存储和管理各种类型的信息数据。
|
||||
所有具体的信息类都应该继承自这个基类。
|
||||
|
||||
Attributes:
|
||||
type (str): 信息类型标识符,默认为 "base"
|
||||
data (Dict[str, Union[str, Dict, list]]): 存储具体信息数据的字典,
|
||||
支持存储字符串、字典、列表等嵌套数据结构
|
||||
"""
|
||||
|
||||
type: str = "structured_info"
|
||||
data: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def get_type(self) -> str:
|
||||
"""获取信息类型
|
||||
|
||||
Returns:
|
||||
str: 当前信息对象的类型标识符
|
||||
"""
|
||||
return self.type
|
||||
|
||||
def get_data(self) -> Dict[str, Any]:
|
||||
"""获取所有信息数据
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: 包含所有信息数据的字典
|
||||
"""
|
||||
return self.data
|
||||
|
||||
def get_info(self, key: str) -> Optional[Any]:
|
||||
"""获取特定属性的信息
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
Optional[Any]: 属性值,如果键不存在则返回 None
|
||||
"""
|
||||
return self.data.get(key)
|
||||
|
||||
def get_info_list(self, key: str) -> List[Any]:
|
||||
"""获取特定属性的信息列表
|
||||
|
||||
Args:
|
||||
key: 要获取的属性键名
|
||||
|
||||
Returns:
|
||||
List[Any]: 属性值列表,如果键不存在则返回空列表
|
||||
"""
|
||||
value = self.data.get(key)
|
||||
if isinstance(value, list):
|
||||
return value
|
||||
return []
|
||||
|
||||
def set_info(self, key: str, value: Any) -> None:
|
||||
"""设置特定属性的信息值
|
||||
|
||||
Args:
|
||||
key: 要设置的属性键名
|
||||
value: 要设置的属性值
|
||||
"""
|
||||
self.data[key] = value
|
||||
|
||||
def get_processed_info(self) -> str:
|
||||
"""获取处理后的信息
|
||||
|
||||
Returns:
|
||||
str: 处理后的信息字符串
|
||||
"""
|
||||
|
||||
info_str = ""
|
||||
# print(f"self.data: {self.data}")
|
||||
|
||||
for key, value in self.data.items():
|
||||
# print(f"key: {key}, value: {value}")
|
||||
info_str += f"信息类型:{key},信息内容:{value}\n"
|
||||
|
||||
return info_str
|
||||
@@ -1,186 +0,0 @@
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
from src.common.logger import get_logger
|
||||
from src.individuality.individuality import get_individuality
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.chat.utils.json_utils import process_llm_tool_calls
|
||||
from .base_processor import BaseProcessor
|
||||
from typing import List
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.info.structured_info import StructuredInfo
|
||||
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
||||
|
||||
logger = get_logger("processor")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
# ... 原有代码 ...
|
||||
|
||||
# 添加工具执行器提示词
|
||||
tool_executor_prompt = """
|
||||
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。
|
||||
群里正在进行的聊天内容:
|
||||
{chat_observe_info}
|
||||
|
||||
请仔细分析聊天内容,考虑以下几点:
|
||||
1. 内容中是否包含需要查询信息的问题
|
||||
2. 是否有明确的工具使用指令
|
||||
|
||||
If you need to use a tool, please directly call the corresponding tool function. If you do not need to use any tool, simply output "No tool needed".
|
||||
"""
|
||||
Prompt(tool_executor_prompt, "tool_executor_prompt")
|
||||
|
||||
|
||||
class ToolProcessor(BaseProcessor):
|
||||
log_prefix = "工具执行器"
|
||||
|
||||
def __init__(self, subheartflow_id: str):
|
||||
super().__init__()
|
||||
self.subheartflow_id = subheartflow_id
|
||||
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.model.focus_tool_use,
|
||||
request_type="focus.processor.tool",
|
||||
)
|
||||
self.structured_info = []
|
||||
|
||||
async def process_info(
|
||||
self,
|
||||
observations: List[Observation] = None,
|
||||
action_type: str = None,
|
||||
action_data: dict = None,
|
||||
**kwargs,
|
||||
) -> List[StructuredInfo]:
|
||||
"""处理信息对象
|
||||
|
||||
Args:
|
||||
observations: 可选的观察列表,包含ChattingObservation和StructureObservation类型
|
||||
action_type: 动作类型
|
||||
action_data: 动作数据
|
||||
**kwargs: 其他可选参数
|
||||
|
||||
Returns:
|
||||
list: 处理后的结构化信息列表
|
||||
"""
|
||||
|
||||
working_infos = []
|
||||
result = []
|
||||
|
||||
if observations:
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
result, used_tools, prompt = await self.execute_tools(observation)
|
||||
|
||||
logger.info(f"工具调用结果: {result}")
|
||||
# 更新WorkingObservation中的结构化信息
|
||||
for observation in observations:
|
||||
if isinstance(observation, StructureObservation):
|
||||
for structured_info in result:
|
||||
# logger.debug(f"{self.log_prefix} 更新WorkingObservation中的结构化信息: {structured_info}")
|
||||
observation.add_structured_info(structured_info)
|
||||
|
||||
working_infos = observation.get_observe_info()
|
||||
logger.debug(f"{self.log_prefix} 获取更新后WorkingObservation中的结构化信息: {working_infos}")
|
||||
|
||||
structured_info = StructuredInfo()
|
||||
if working_infos:
|
||||
for working_info in working_infos:
|
||||
structured_info.set_info(key=working_info.get("type"), value=working_info.get("content"))
|
||||
|
||||
return [structured_info]
|
||||
|
||||
async def execute_tools(self, observation: ChattingObservation, action_type: str = None, action_data: dict = None):
|
||||
"""
|
||||
并行执行工具,返回结构化信息
|
||||
|
||||
参数:
|
||||
sub_mind: 子思维对象
|
||||
chat_target_name: 聊天目标名称,默认为"对方"
|
||||
is_group_chat: 是否为群聊,默认为False
|
||||
return_details: 是否返回详细信息,默认为False
|
||||
cycle_info: 循环信息对象,可用于记录详细执行信息
|
||||
action_type: 动作类型
|
||||
action_data: 动作数据
|
||||
|
||||
返回:
|
||||
如果return_details为False:
|
||||
List[Dict]: 工具执行结果的结构化信息列表
|
||||
如果return_details为True:
|
||||
Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词)
|
||||
"""
|
||||
tool_instance = ToolUser()
|
||||
tools = tool_instance._define_tools()
|
||||
|
||||
# logger.debug(f"observation: {observation}")
|
||||
# logger.debug(f"observation.chat_target_info: {observation.chat_target_info}")
|
||||
# logger.debug(f"observation.is_group_chat: {observation.is_group_chat}")
|
||||
# logger.debug(f"observation.person_list: {observation.person_list}")
|
||||
|
||||
is_group_chat = observation.is_group_chat
|
||||
|
||||
# chat_observe_info = observation.get_observe_info()
|
||||
chat_observe_info = observation.talking_message_str_truncate_short
|
||||
# person_list = observation.person_list
|
||||
|
||||
# 获取时间信息
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
# 构建专用于工具调用的提示词
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"tool_executor_prompt",
|
||||
chat_observe_info=chat_observe_info,
|
||||
is_group_chat=is_group_chat,
|
||||
bot_name=get_individuality().name,
|
||||
time_now=time_now,
|
||||
)
|
||||
|
||||
# 调用LLM,专注于工具使用
|
||||
# logger.info(f"开始执行工具调用{prompt}")
|
||||
response, other_info = await self.llm_model.generate_response_async(prompt=prompt, tools=tools)
|
||||
|
||||
if len(other_info) == 3:
|
||||
reasoning_content, model_name, tool_calls = other_info
|
||||
else:
|
||||
reasoning_content, model_name = other_info
|
||||
tool_calls = None
|
||||
|
||||
# print("tooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltool")
|
||||
if tool_calls:
|
||||
logger.info(f"获取到工具原始输出:\n{tool_calls}")
|
||||
# 处理工具调用和结果收集,类似于SubMind中的逻辑
|
||||
new_structured_items = []
|
||||
used_tools = [] # 记录使用了哪些工具
|
||||
|
||||
if tool_calls:
|
||||
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
|
||||
if success and valid_tool_calls:
|
||||
for tool_call in valid_tool_calls:
|
||||
try:
|
||||
# 记录使用的工具名称
|
||||
tool_name = tool_call.get("name", "unknown_tool")
|
||||
used_tools.append(tool_name)
|
||||
|
||||
result = await tool_instance._execute_tool_call(tool_call)
|
||||
|
||||
name = result.get("type", "unknown_type")
|
||||
content = result.get("content", "")
|
||||
|
||||
logger.info(f"工具{name},获得信息:{content}")
|
||||
if result:
|
||||
new_item = {
|
||||
"type": result.get("type", "unknown_type"),
|
||||
"id": result.get("id", f"tool_exec_{time.time()}"),
|
||||
"content": result.get("content", ""),
|
||||
"ttl": 3,
|
||||
}
|
||||
new_structured_items.append(new_item)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}工具执行失败: {e}")
|
||||
|
||||
return new_structured_items, used_tools, prompt
|
||||
|
||||
|
||||
init_prompt()
|
||||
@@ -46,9 +46,12 @@ def init_prompt():
|
||||
# --- Group Chat Prompt ---
|
||||
memory_activator_prompt = """
|
||||
你是一个记忆分析器,你需要根据以下信息来进行回忆
|
||||
以下是一场聊天中的信息,请根据这些信息,总结出几个关键词作为记忆回忆的触发词
|
||||
以下是一段聊天记录,请根据这些信息,总结出几个关键词作为记忆回忆的触发词
|
||||
|
||||
聊天记录:
|
||||
{obs_info_text}
|
||||
你想要回复的消息:
|
||||
{target_message}
|
||||
|
||||
历史关键词(请避免重复提取这些关键词):
|
||||
{cached_keywords}
|
||||
@@ -69,12 +72,12 @@ class MemoryActivator:
|
||||
self.summary_model = LLMRequest(
|
||||
model=global_config.model.memory_summary,
|
||||
temperature=0.7,
|
||||
request_type="focus.memory_activator",
|
||||
request_type="memory_activator",
|
||||
)
|
||||
self.running_memory = []
|
||||
self.cached_keywords = set() # 用于缓存历史关键词
|
||||
|
||||
async def activate_memory_with_chat_history(self, chat_id, target_message, chat_history_prompt) -> List[Dict]:
|
||||
async def activate_memory_with_chat_history(self, target_message, chat_history_prompt) -> List[Dict]:
|
||||
"""
|
||||
激活记忆
|
||||
|
||||
@@ -88,23 +91,13 @@ class MemoryActivator:
|
||||
if not global_config.memory.enable_memory:
|
||||
return []
|
||||
|
||||
# obs_info_text = ""
|
||||
# for observation in observations:
|
||||
# if isinstance(observation, ChattingObservation):
|
||||
# obs_info_text += observation.talking_message_str_truncate_short
|
||||
# elif isinstance(observation, StructureObservation):
|
||||
# working_info = observation.get_observe_info()
|
||||
# for working_info_item in working_info:
|
||||
# obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n"
|
||||
|
||||
# logger.info(f"回忆待检索内容:obs_info_text: {obs_info_text}")
|
||||
|
||||
# 将缓存的关键词转换为字符串,用于prompt
|
||||
cached_keywords_str = ", ".join(self.cached_keywords) if self.cached_keywords else "暂无历史关键词"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"memory_activator_prompt",
|
||||
obs_info_text=chat_history_prompt,
|
||||
target_message=target_message,
|
||||
cached_keywords=cached_keywords_str,
|
||||
)
|
||||
|
||||
@@ -130,9 +123,6 @@ class MemoryActivator:
|
||||
related_memory = await hippocampus_manager.get_memory_from_topic(
|
||||
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||
)
|
||||
# related_memory = await hippocampus_manager.get_memory_from_text(
|
||||
# text=obs_info_text, max_memory_num=5, max_memory_length=2, max_depth=3, fast_retrieval=False
|
||||
# )
|
||||
|
||||
logger.info(f"获取到的记忆: {related_memory}")
|
||||
|
||||
|
||||
@@ -8,14 +8,9 @@ from src.chat.utils.chat_message_builder import (
|
||||
get_person_id_list,
|
||||
)
|
||||
from src.chat.utils.prompt_builder import global_prompt_manager, Prompt
|
||||
from typing import Optional
|
||||
import difflib
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
@@ -108,75 +103,6 @@ class ChattingObservation(Observation):
|
||||
def get_observe_info(self, ids=None):
|
||||
return self.talking_message_str
|
||||
|
||||
def get_recv_message_by_text(self, sender: str, text: str) -> Optional[MessageRecv]:
|
||||
"""
|
||||
根据回复的纯文本
|
||||
1. 在talking_message中查找最新的,最匹配的消息
|
||||
2. 如果找到,则返回消息
|
||||
"""
|
||||
find_msg = None
|
||||
reverse_talking_message = list(reversed(self.talking_message))
|
||||
|
||||
for message in reverse_talking_message:
|
||||
user_id = message["user_id"]
|
||||
platform = message["platform"]
|
||||
person_id = get_person_info_manager().get_person_id(platform, user_id)
|
||||
person_name = get_person_info_manager().get_value(person_id, "person_name")
|
||||
if person_name == sender:
|
||||
similarity = difflib.SequenceMatcher(None, text, message["processed_plain_text"]).ratio()
|
||||
if similarity >= 0.9:
|
||||
find_msg = message
|
||||
break
|
||||
|
||||
if not find_msg:
|
||||
return None
|
||||
|
||||
user_info = {
|
||||
"platform": find_msg.get("user_platform", ""),
|
||||
"user_id": find_msg.get("user_id", ""),
|
||||
"user_nickname": find_msg.get("user_nickname", ""),
|
||||
"user_cardname": find_msg.get("user_cardname", ""),
|
||||
}
|
||||
|
||||
group_info = {}
|
||||
if find_msg.get("chat_info_group_id"):
|
||||
group_info = {
|
||||
"platform": find_msg.get("chat_info_group_platform", ""),
|
||||
"group_id": find_msg.get("chat_info_group_id", ""),
|
||||
"group_name": find_msg.get("chat_info_group_name", ""),
|
||||
}
|
||||
|
||||
content_format = ""
|
||||
accept_format = ""
|
||||
template_items = {}
|
||||
|
||||
format_info = {"content_format": content_format, "accept_format": accept_format}
|
||||
template_info = {
|
||||
"template_items": template_items,
|
||||
}
|
||||
|
||||
message_info = {
|
||||
"platform": self.platform,
|
||||
"message_id": find_msg.get("message_id"),
|
||||
"time": find_msg.get("time"),
|
||||
"group_info": group_info,
|
||||
"user_info": user_info,
|
||||
"additional_config": find_msg.get("additional_config"),
|
||||
"format_info": format_info,
|
||||
"template_info": template_info,
|
||||
}
|
||||
message_dict = {
|
||||
"message_info": message_info,
|
||||
"raw_message": find_msg.get("processed_plain_text"),
|
||||
"detailed_plain_text": find_msg.get("processed_plain_text"),
|
||||
"processed_plain_text": find_msg.get("processed_plain_text"),
|
||||
}
|
||||
find_rec_msg = MessageRecv(message_dict)
|
||||
|
||||
find_rec_msg.update_chat_stream(get_chat_manager().get_or_create_stream(self.chat_id))
|
||||
|
||||
return find_rec_msg
|
||||
|
||||
async def observe(self):
|
||||
# 自上一次观察的新消息
|
||||
new_messages_list = get_raw_msg_by_timestamp_with_chat(
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
from datetime import datetime
|
||||
from src.common.logger import get_logger
|
||||
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class StructureObservation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
self.history_loop = []
|
||||
self.structured_info = []
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""将观察对象转换为可序列化的字典"""
|
||||
return {
|
||||
"observe_info": self.observe_info,
|
||||
"observe_id": self.observe_id,
|
||||
"last_observe_time": self.last_observe_time,
|
||||
"history_loop": self.history_loop,
|
||||
"structured_info": self.structured_info,
|
||||
}
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.structured_info
|
||||
|
||||
def add_structured_info(self, structured_info: dict):
|
||||
self.structured_info.append(structured_info)
|
||||
|
||||
async def observe(self):
|
||||
observed_structured_infos = []
|
||||
for structured_info in self.structured_info:
|
||||
if structured_info.get("ttl") > 0:
|
||||
structured_info["ttl"] -= 1
|
||||
observed_structured_infos.append(structured_info)
|
||||
logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
|
||||
|
||||
self.structured_info = observed_structured_infos
|
||||
@@ -62,7 +62,10 @@ class SubHeartflow:
|
||||
"""异步初始化方法,创建兴趣流并确定聊天类型"""
|
||||
|
||||
# 根据配置决定初始状态
|
||||
if global_config.chat.chat_mode == "focus":
|
||||
if not self.is_group_chat:
|
||||
logger.debug(f"{self.log_prefix} 检测到是私聊,将直接尝试进入 FOCUSED 状态。")
|
||||
await self.change_chat_state(ChatState.FOCUSED)
|
||||
elif global_config.chat.chat_mode == "focus":
|
||||
logger.debug(f"{self.log_prefix} 配置为 focus 模式,将直接尝试进入 FOCUSED 状态。")
|
||||
await self.change_chat_state(ChatState.FOCUSED)
|
||||
else: # "auto" 或其他模式保持原有逻辑或默认为 NORMAL
|
||||
|
||||
@@ -91,16 +91,10 @@ class SubHeartflowManager:
|
||||
return subflow
|
||||
|
||||
try:
|
||||
# 初始化子心流, 传入 mai_state_info
|
||||
new_subflow = SubHeartflow(
|
||||
subheartflow_id,
|
||||
)
|
||||
|
||||
# 首先创建并添加聊天观察者
|
||||
# observation = ChattingObservation(chat_id=subheartflow_id)
|
||||
# await observation.initialize()
|
||||
# new_subflow.add_observation(observation)
|
||||
|
||||
# 然后再进行异步初始化,此时 SubHeartflow 内部若需启动 HeartFChatting,就能拿到 observation
|
||||
await new_subflow.initialize()
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import traceback
|
||||
import os
|
||||
from typing import Dict, Any
|
||||
|
||||
from src.common.logger import get_logger
|
||||
@@ -13,13 +14,65 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.config.config import global_config
|
||||
from src.plugin_system.core.component_registry import component_registry # 导入新插件系统
|
||||
from src.plugin_system.base.base_command import BaseCommand
|
||||
from src.mais4u.mais4u_chat.s4u_msg_processor import S4UMessageProcessor
|
||||
from maim_message import UserInfo
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
import re
|
||||
# 定义日志配置
|
||||
|
||||
# 获取项目根目录(假设本文件在src/chat/message_receive/下,根目录为上上上级目录)
|
||||
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
|
||||
|
||||
ENABLE_S4U_CHAT = os.path.isfile(os.path.join(PROJECT_ROOT, "s4u.s4u"))
|
||||
|
||||
if ENABLE_S4U_CHAT:
|
||||
print("""\nS4U私聊模式已开启\n!!!!!!!!!!!!!!!!!\n""")
|
||||
# 仅内部开启
|
||||
|
||||
# 配置主程序日志格式
|
||||
logger = get_logger("chat")
|
||||
|
||||
|
||||
def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
|
||||
"""检查消息是否包含过滤词
|
||||
|
||||
Args:
|
||||
text: 待检查的文本
|
||||
chat: 聊天对象
|
||||
userinfo: 用户信息
|
||||
|
||||
Returns:
|
||||
bool: 是否包含过滤词
|
||||
"""
|
||||
for word in global_config.message_receive.ban_words:
|
||||
if word in text:
|
||||
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
|
||||
logger.info(f"[过滤词识别]消息中含有{word},filtered")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
|
||||
"""检查消息是否匹配过滤正则表达式
|
||||
|
||||
Args:
|
||||
text: 待检查的文本
|
||||
chat: 聊天对象
|
||||
userinfo: 用户信息
|
||||
|
||||
Returns:
|
||||
bool: 是否匹配过滤正则
|
||||
"""
|
||||
for pattern in global_config.message_receive.ban_msgs_regex:
|
||||
if re.search(pattern, text):
|
||||
chat_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
|
||||
logger.info(f"[正则表达式过滤]消息匹配到{pattern},filtered")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class ChatBot:
|
||||
def __init__(self):
|
||||
self.bot = None # bot 实例引用
|
||||
@@ -30,6 +83,7 @@ class ChatBot:
|
||||
# 创建初始化PFC管理器的任务,会在_ensure_started时执行
|
||||
self.only_process_chat = MessageProcessor()
|
||||
self.pfc_manager = PFCManager.get_instance()
|
||||
self.s4u_message_processor = S4UMessageProcessor()
|
||||
|
||||
async def _ensure_started(self):
|
||||
"""确保所有任务已启动"""
|
||||
@@ -38,17 +92,6 @@ class ChatBot:
|
||||
|
||||
self._started = True
|
||||
|
||||
async def _create_pfc_chat(self, message: MessageRecv):
|
||||
try:
|
||||
if global_config.experimental.pfc_chatting:
|
||||
chat_id = str(message.chat_stream.stream_id)
|
||||
private_name = str(message.message_info.user_info.user_nickname)
|
||||
|
||||
await self.pfc_manager.get_or_create_conversation(chat_id, private_name)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"创建PFC聊天失败: {e}")
|
||||
|
||||
async def _process_commands_with_new_system(self, message: MessageRecv):
|
||||
# sourcery skip: use-named-expression
|
||||
"""使用新插件系统处理命令"""
|
||||
@@ -139,14 +182,20 @@ class ChatBot:
|
||||
|
||||
get_chat_manager().register_message(message)
|
||||
|
||||
# 创建聊天流
|
||||
chat = await get_chat_manager().get_or_create_stream(
|
||||
platform=message.message_info.platform,
|
||||
user_info=user_info,
|
||||
group_info=group_info,
|
||||
)
|
||||
|
||||
message.update_chat_stream(chat)
|
||||
|
||||
# 过滤检查
|
||||
if _check_ban_words(message.processed_plain_text, chat, user_info) or _check_ban_regex(
|
||||
message.raw_message, chat, user_info
|
||||
):
|
||||
return
|
||||
|
||||
# 处理消息内容,生成纯文本
|
||||
await message.process()
|
||||
|
||||
@@ -172,24 +221,12 @@ class ChatBot:
|
||||
template_group_name = None
|
||||
|
||||
async def preprocess():
|
||||
logger.debug("开始预处理消息...")
|
||||
# 如果在私聊中
|
||||
if group_info is None:
|
||||
logger.debug("检测到私聊消息")
|
||||
if global_config.experimental.pfc_chatting:
|
||||
logger.debug("进入PFC私聊处理流程")
|
||||
# 创建聊天流
|
||||
logger.debug(f"为{user_info.user_id}创建/获取聊天流")
|
||||
await self.only_process_chat.process_message(message)
|
||||
await self._create_pfc_chat(message)
|
||||
# 禁止PFC,进入普通的心流消息处理逻辑
|
||||
else:
|
||||
logger.debug("进入普通心流私聊处理")
|
||||
await self.heartflow_message_receiver.process_message(message)
|
||||
# 群聊默认进入心流消息处理逻辑
|
||||
else:
|
||||
logger.debug(f"检测到群聊消息,群ID: {group_info.group_id}")
|
||||
await self.heartflow_message_receiver.process_message(message)
|
||||
if ENABLE_S4U_CHAT:
|
||||
logger.info("进入S4U流程")
|
||||
await self.s4u_message_processor.process_message(message)
|
||||
return
|
||||
|
||||
await self.heartflow_message_receiver.process_message(message)
|
||||
|
||||
if template_group_name:
|
||||
async with global_prompt_manager.async_message_scope(template_group_name):
|
||||
|
||||
@@ -301,6 +301,7 @@ class MessageSending(MessageProcessBase):
|
||||
is_emoji: bool = False,
|
||||
thinking_start_time: float = 0,
|
||||
apply_set_reply_logic: bool = False,
|
||||
reply_to: str = None,
|
||||
):
|
||||
# 调用父类初始化
|
||||
super().__init__(
|
||||
@@ -319,6 +320,8 @@ class MessageSending(MessageProcessBase):
|
||||
self.is_emoji = is_emoji
|
||||
self.apply_set_reply_logic = apply_set_reply_logic
|
||||
|
||||
self.reply_to = reply_to
|
||||
|
||||
# 用于显示发送内容与显示不一致的情况
|
||||
self.display_message = display_message
|
||||
|
||||
|
||||
@@ -35,9 +35,13 @@ class MessageStorage:
|
||||
filtered_display_message = re.sub(pattern, "", display_message, flags=re.DOTALL)
|
||||
else:
|
||||
filtered_display_message = ""
|
||||
|
||||
reply_to = message.reply_to
|
||||
else:
|
||||
filtered_display_message = ""
|
||||
|
||||
reply_to = ""
|
||||
|
||||
chat_info_dict = chat_stream.to_dict()
|
||||
user_info_dict = message.message_info.user_info.to_dict()
|
||||
|
||||
@@ -54,6 +58,7 @@ class MessageStorage:
|
||||
time=float(message.message_info.time),
|
||||
chat_id=chat_stream.stream_id,
|
||||
# Flattened chat_info
|
||||
reply_to=reply_to,
|
||||
chat_info_stream_id=chat_info_dict.get("stream_id"),
|
||||
chat_info_platform=chat_info_dict.get("platform"),
|
||||
chat_info_user_platform=user_info_from_chat.get("platform"),
|
||||
|
||||
@@ -29,7 +29,6 @@ import traceback
|
||||
|
||||
from .normal_chat_generator import NormalChatGenerator
|
||||
from src.chat.normal_chat.normal_chat_expressor import NormalChatExpressor
|
||||
from src.chat.replyer.default_generator import DefaultReplyer
|
||||
from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner
|
||||
from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier
|
||||
|
||||
@@ -69,7 +68,6 @@ class NormalChat:
|
||||
|
||||
# 初始化Normal Chat专用表达器
|
||||
self.expressor = NormalChatExpressor(self.chat_stream)
|
||||
self.replyer = DefaultReplyer(self.chat_stream)
|
||||
|
||||
# Interest dict
|
||||
self.interest_dict = interest_dict
|
||||
|
||||
@@ -16,7 +16,7 @@ class NormalChatGenerator:
|
||||
model_config_1 = global_config.model.replyer_1.copy()
|
||||
model_config_2 = global_config.model.replyer_2.copy()
|
||||
|
||||
prob_first = global_config.normal_chat.normal_chat_first_probability
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
model_config_1["weight"] = prob_first
|
||||
model_config_2["weight"] = 1.0 - prob_first
|
||||
@@ -42,15 +42,13 @@ class NormalChatGenerator:
|
||||
relation_info = await person_info_manager.get_value(person_id, "short_impression")
|
||||
reply_to_str = f"{person_name}:{message.processed_plain_text}"
|
||||
|
||||
structured_info = ""
|
||||
|
||||
try:
|
||||
success, reply_set, prompt = await generator_api.generate_reply(
|
||||
chat_stream=message.chat_stream,
|
||||
reply_to=reply_to_str,
|
||||
relation_info=relation_info,
|
||||
structured_info=structured_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||
model_configs=self.model_configs,
|
||||
request_type="normal.replyer",
|
||||
return_prompt=True,
|
||||
|
||||
@@ -15,7 +15,6 @@ from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
from src.chat.express.exprssion_learner import get_expression_learner
|
||||
import time
|
||||
import asyncio
|
||||
from src.chat.express.expression_selector import expression_selector
|
||||
@@ -28,6 +27,7 @@ from datetime import datetime
|
||||
import re
|
||||
from src.chat.knowledge.knowledge_lib import qa_manager
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
from src.tools.tool_executor import ToolExecutor
|
||||
|
||||
logger = get_logger("replyer")
|
||||
|
||||
@@ -36,13 +36,14 @@ def init_prompt():
|
||||
Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1")
|
||||
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
|
||||
Prompt("在群里聊天", "chat_target_group2")
|
||||
Prompt("和{sender_name}私聊", "chat_target_private2")
|
||||
Prompt("和{sender_name}聊天", "chat_target_private2")
|
||||
Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
{expression_habits_block}
|
||||
{structured_info_block}
|
||||
{tool_info_block}
|
||||
{knowledge_prompt}
|
||||
{memory_block}
|
||||
{relation_info_block}
|
||||
{extra_info_block}
|
||||
@@ -67,88 +68,52 @@ def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
{expression_habits_block}
|
||||
{structured_info_block}
|
||||
{memory_block}
|
||||
{relation_info_block}
|
||||
{extra_info_block}
|
||||
|
||||
{chat_target}
|
||||
{time_block}
|
||||
{chat_target}
|
||||
{chat_info}
|
||||
现在"{sender_name}"说:{target_message}。你想要回复对方的这条消息。
|
||||
{identity},
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。注意不要复读你说过的话。
|
||||
{identity}
|
||||
|
||||
{config_expression_style}。回复不要浮夸,不要用夸张修辞,平淡一些。
|
||||
{keywords_reaction_prompt}
|
||||
请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。
|
||||
不要浮夸,不要夸张修辞,请注意不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_generator_private_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你可以参考你的以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
|
||||
{style_habbits}
|
||||
|
||||
你现在正在群里聊天,以下是群里正在进行的聊天内容:
|
||||
{chat_info}
|
||||
|
||||
以上是聊天内容,你需要了解聊天记录中的内容
|
||||
|
||||
{chat_target}
|
||||
你的名字是{bot_name},{prompt_personality},在这聊天中,"{sender_name}"说的"{target_message}"引起了你的注意,对这句话,你想表达:{raw_reply},原因是:{reason}。你现在要思考怎么回复
|
||||
你正在{chat_target_2},{reply_target_block}
|
||||
对这句话,你想表达,原句:{raw_reply},原因是:{reason}。你现在要思考怎么组织回复
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。请你修改你想表达的原句,符合你的表达风格和语言习惯
|
||||
请你根据情景使用以下句法:
|
||||
{grammar_habbits}
|
||||
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
{keywords_reaction_prompt}
|
||||
{moderation_prompt}
|
||||
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_expressor_prompt",
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你可以参考以下的语言习惯,如果情景合适就使用,不要盲目使用,不要生硬使用,而是结合到表达中:
|
||||
{style_habbits}
|
||||
|
||||
你现在正在群里聊天,以下是群里正在进行的聊天内容:
|
||||
{chat_info}
|
||||
|
||||
以上是聊天内容,你需要了解聊天记录中的内容
|
||||
|
||||
{chat_target}
|
||||
你的名字是{bot_name},{prompt_personality},在这聊天中,"{sender_name}"说的"{target_message}"引起了你的注意,对这句话,你想表达:{raw_reply},原因是:{reason}。你现在要思考怎么回复
|
||||
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。
|
||||
请你根据情景使用以下句法:
|
||||
{grammar_habbits}
|
||||
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
|
||||
不要浮夸,不要夸张修辞,平淡且不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 ),只输出一条回复就好。
|
||||
现在,你说:
|
||||
""",
|
||||
"default_expressor_private_prompt", # New template for private FOCUSED chat
|
||||
)
|
||||
|
||||
|
||||
class DefaultReplyer:
|
||||
def __init__(
|
||||
self,
|
||||
chat_stream: ChatStream,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "focus.replyer",
|
||||
):
|
||||
self.log_prefix = "replyer"
|
||||
self.request_type = request_type
|
||||
|
||||
self.enable_tool = enable_tool
|
||||
|
||||
if model_configs:
|
||||
self.express_model_configs = model_configs
|
||||
else:
|
||||
# 当未提供配置时,使用默认配置并赋予默认权重
|
||||
default_config = global_config.model.replyer_1.copy()
|
||||
default_config.setdefault("weight", 1.0)
|
||||
self.express_model_configs = [default_config]
|
||||
|
||||
model_config_1 = global_config.model.replyer_1.copy()
|
||||
model_config_2 = global_config.model.replyer_2.copy()
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
model_config_1["weight"] = prob_first
|
||||
model_config_2["weight"] = 1.0 - prob_first
|
||||
|
||||
self.express_model_configs = [model_config_1, model_config_2]
|
||||
|
||||
if not self.express_model_configs:
|
||||
logger.warning("未找到有效的模型配置,回复生成可能会失败。")
|
||||
@@ -157,12 +122,13 @@ class DefaultReplyer:
|
||||
fallback_config.setdefault("weight", 1.0)
|
||||
self.express_model_configs = [fallback_config]
|
||||
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
self.memory_activator = MemoryActivator()
|
||||
|
||||
self.chat_stream = chat_stream
|
||||
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id)
|
||||
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
self.memory_activator = MemoryActivator()
|
||||
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3)
|
||||
|
||||
def _select_weighted_model_config(self) -> Dict[str, Any]:
|
||||
"""使用加权随机选择来挑选一个模型配置"""
|
||||
configs = self.express_model_configs
|
||||
@@ -205,7 +171,6 @@ class DefaultReplyer:
|
||||
reply_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
structured_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
@@ -222,7 +187,6 @@ class DefaultReplyer:
|
||||
reply_data = {
|
||||
"reply_to": reply_to,
|
||||
"relation_info": relation_info,
|
||||
"structured_info": structured_info,
|
||||
"extra_info": extra_info,
|
||||
}
|
||||
for key, value in reply_data.items():
|
||||
@@ -246,7 +210,7 @@ class DefaultReplyer:
|
||||
# 加权随机选择一个模型配置
|
||||
selected_model_config = self._select_weighted_model_config()
|
||||
logger.info(
|
||||
f"{self.log_prefix} 使用模型配置: {selected_model_config.get('model_name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})"
|
||||
f"{self.log_prefix} 使用模型配置: {selected_model_config.get('name', 'N/A')} (权重: {selected_model_config.get('weight', 1.0)})"
|
||||
)
|
||||
|
||||
express_model = LLMRequest(
|
||||
@@ -271,20 +235,29 @@ class DefaultReplyer:
|
||||
traceback.print_exc()
|
||||
return False, None
|
||||
|
||||
async def rewrite_reply_with_context(self, reply_data: Dict[str, Any]) -> Tuple[bool, Optional[str]]:
|
||||
async def rewrite_reply_with_context(
|
||||
self,
|
||||
reply_data: Dict[str, Any],
|
||||
raw_reply: str = "",
|
||||
reason: str = "",
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
"""
|
||||
表达器 (Expressor): 核心逻辑,负责生成回复文本。
|
||||
"""
|
||||
try:
|
||||
reply_to = reply_data.get("reply_to", "")
|
||||
raw_reply = reply_data.get("raw_reply", "")
|
||||
reason = reply_data.get("reason", "")
|
||||
if not reply_data:
|
||||
reply_data = {
|
||||
"reply_to": reply_to,
|
||||
"relation_info": relation_info,
|
||||
}
|
||||
|
||||
with Timer("构建Prompt", {}): # 内部计时器,可选保留
|
||||
prompt = await self.build_prompt_rewrite_context(
|
||||
raw_reply=raw_reply,
|
||||
reason=reason,
|
||||
reply_to=reply_to,
|
||||
reply_data=reply_data,
|
||||
)
|
||||
|
||||
content = None
|
||||
@@ -309,8 +282,7 @@ class DefaultReplyer:
|
||||
|
||||
content, (reasoning_content, model_name) = await express_model.generate_response_async(prompt)
|
||||
|
||||
logger.info(f"想要表达:{raw_reply}||理由:{reason}")
|
||||
logger.info(f"最终回复: {content}\n")
|
||||
logger.info(f"想要表达:{raw_reply}||理由:{reason}||生成回复: {content}\n")
|
||||
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
@@ -325,6 +297,9 @@ class DefaultReplyer:
|
||||
return False, None
|
||||
|
||||
async def build_relation_info(self, reply_data=None, chat_history=None):
|
||||
if not global_config.relationship.enable_relationship:
|
||||
return ""
|
||||
|
||||
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
|
||||
if not reply_data:
|
||||
return ""
|
||||
@@ -344,6 +319,9 @@ class DefaultReplyer:
|
||||
return relation_info
|
||||
|
||||
async def build_expression_habits(self, chat_history, target):
|
||||
if not global_config.expression.enable_expression:
|
||||
return ""
|
||||
|
||||
style_habbits = []
|
||||
grammar_habbits = []
|
||||
|
||||
@@ -379,8 +357,11 @@ class DefaultReplyer:
|
||||
return expression_habits_block
|
||||
|
||||
async def build_memory_block(self, chat_history, target):
|
||||
if not global_config.memory.enable_memory:
|
||||
return ""
|
||||
|
||||
running_memorys = await self.memory_activator.activate_memory_with_chat_history(
|
||||
chat_id=self.chat_stream.stream_id, target_message=target, chat_history_prompt=chat_history
|
||||
target_message=target, chat_history_prompt=chat_history
|
||||
)
|
||||
|
||||
if running_memorys:
|
||||
@@ -394,6 +375,52 @@ class DefaultReplyer:
|
||||
|
||||
return memory_block
|
||||
|
||||
async def build_tool_info(self, reply_data=None, chat_history=None):
|
||||
"""构建工具信息块
|
||||
|
||||
Args:
|
||||
reply_data: 回复数据,包含要回复的消息内容
|
||||
chat_history: 聊天历史
|
||||
|
||||
Returns:
|
||||
str: 工具信息字符串
|
||||
"""
|
||||
|
||||
if not reply_data:
|
||||
return ""
|
||||
|
||||
reply_to = reply_data.get("reply_to", "")
|
||||
sender, text = self._parse_reply_target(reply_to)
|
||||
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
try:
|
||||
# 使用工具执行器获取信息
|
||||
tool_results = await self.tool_executor.execute_from_chat_message(
|
||||
sender=sender, target_message=text, chat_history=chat_history, return_details=False
|
||||
)
|
||||
|
||||
if tool_results:
|
||||
tool_info_str = "以下是你通过工具获取到的实时信息:\n"
|
||||
for tool_result in tool_results:
|
||||
tool_name = tool_result.get("tool_name", "unknown")
|
||||
content = tool_result.get("content", "")
|
||||
result_type = tool_result.get("type", "info")
|
||||
|
||||
tool_info_str += f"- 【{tool_name}】{result_type}: {content}\n"
|
||||
|
||||
tool_info_str += "以上是你获取到的实时信息,请在回复时参考这些信息。"
|
||||
logger.info(f"{self.log_prefix} 获取到 {len(tool_results)} 个工具结果")
|
||||
return tool_info_str
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix} 未获取到任何工具结果")
|
||||
return ""
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 工具信息获取失败: {e}")
|
||||
return ""
|
||||
|
||||
def _parse_reply_target(self, target_message: str) -> tuple:
|
||||
sender = ""
|
||||
target = ""
|
||||
@@ -457,8 +484,6 @@ class DefaultReplyer:
|
||||
person_info_manager = get_person_info_manager()
|
||||
bot_person_id = person_info_manager.get_person_id("system", "bot_id")
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
structured_info = reply_data.get("structured_info", "")
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "")
|
||||
|
||||
@@ -502,21 +527,22 @@ class DefaultReplyer:
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
# 并行执行三个构建任务
|
||||
expression_habits_block, relation_info, memory_block = await asyncio.gather(
|
||||
# 并行执行四个构建任务
|
||||
expression_habits_block, relation_info, memory_block, tool_info = await asyncio.gather(
|
||||
self.build_expression_habits(chat_talking_prompt_half, target),
|
||||
self.build_relation_info(reply_data, chat_talking_prompt_half),
|
||||
self.build_memory_block(chat_talking_prompt_half, target),
|
||||
self.build_tool_info(reply_data, chat_talking_prompt_half),
|
||||
)
|
||||
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
|
||||
if structured_info:
|
||||
structured_info_block = (
|
||||
f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{structured_info}\n以上是一些额外的信息。"
|
||||
if tool_info:
|
||||
tool_info_block = (
|
||||
f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{tool_info}\n以上是一些额外的信息。"
|
||||
)
|
||||
else:
|
||||
structured_info_block = ""
|
||||
tool_info_block = ""
|
||||
|
||||
if extra_info_block:
|
||||
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
|
||||
@@ -555,20 +581,25 @@ class DefaultReplyer:
|
||||
"请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。"
|
||||
)
|
||||
|
||||
if is_group_chat:
|
||||
if sender:
|
||||
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要在群里发言或者回复消息。"
|
||||
else: # private chat
|
||||
if sender:
|
||||
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复。"
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要回复。"
|
||||
if sender and target:
|
||||
if is_group_chat:
|
||||
if sender:
|
||||
reply_target_block = (
|
||||
f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
)
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要在群里发言或者回复消息。"
|
||||
else: # private chat
|
||||
if sender:
|
||||
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复。"
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要回复。"
|
||||
else:
|
||||
reply_target_block = ""
|
||||
|
||||
mood_prompt = mood_manager.get_mood_prompt()
|
||||
|
||||
@@ -576,173 +607,171 @@ class DefaultReplyer:
|
||||
if prompt_info:
|
||||
prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info)
|
||||
|
||||
# --- Choose template based on chat type ---
|
||||
template_name = "default_generator_prompt"
|
||||
if is_group_chat:
|
||||
template_name = "default_generator_prompt"
|
||||
# Group specific formatting variables (already fetched or default)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
|
||||
chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
memory_block=memory_block,
|
||||
structured_info_block=structured_info_block,
|
||||
extra_info_block=extra_info_block,
|
||||
relation_info_block=relation_info,
|
||||
time_block=time_block,
|
||||
reply_target_block=reply_target_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
identity=indentify_block,
|
||||
target_message=target,
|
||||
sender_name=sender,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
action_descriptions=action_descriptions,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_prompt=mood_prompt,
|
||||
)
|
||||
else: # Private chat
|
||||
template_name = "default_generator_private_prompt"
|
||||
# 在私聊时获取对方的昵称信息
|
||||
else:
|
||||
chat_target_name = "对方"
|
||||
if self.chat_target_info:
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方"
|
||||
)
|
||||
chat_target_1 = f"你正在和 {chat_target_name} 聊天"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
memory_block=memory_block,
|
||||
structured_info_block=structured_info_block,
|
||||
relation_info_block=relation_info,
|
||||
extra_info_block=extra_info_block,
|
||||
time_block=time_block,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
identity=indentify_block,
|
||||
target_message=target,
|
||||
sender_name=sender,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
chat_target_1 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private1", sender_name=chat_target_name
|
||||
)
|
||||
chat_target_2 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private2", sender_name=chat_target_name
|
||||
)
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
memory_block=memory_block,
|
||||
tool_info_block=tool_info_block,
|
||||
knowledge_prompt=prompt_info,
|
||||
extra_info_block=extra_info_block,
|
||||
relation_info_block=relation_info,
|
||||
time_block=time_block,
|
||||
reply_target_block=reply_target_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
identity=indentify_block,
|
||||
target_message=target,
|
||||
sender_name=sender,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
action_descriptions=action_descriptions,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_prompt=mood_prompt,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
async def build_prompt_rewrite_context(
|
||||
self,
|
||||
reason,
|
||||
raw_reply,
|
||||
reply_to,
|
||||
reply_data: Dict[str, Any],
|
||||
raw_reply: str = "",
|
||||
reason: str = "",
|
||||
) -> str:
|
||||
sender = ""
|
||||
target = ""
|
||||
if ":" in reply_to or ":" in reply_to:
|
||||
# 使用正则表达式匹配中文或英文冒号
|
||||
parts = re.split(pattern=r"[::]", string=reply_to, maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
sender = parts[0].strip()
|
||||
target = parts[1].strip()
|
||||
|
||||
chat_stream = self.chat_stream
|
||||
|
||||
chat_id = chat_stream.stream_id
|
||||
person_info_manager = get_person_info_manager()
|
||||
bot_person_id = person_info_manager.get_person_id("system", "bot_id")
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
|
||||
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_id,
|
||||
timestamp=time.time(),
|
||||
limit=global_config.chat.max_context_size,
|
||||
limit=int(global_config.chat.max_context_size * 0.5),
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
chat_talking_prompt_half = build_readable_messages(
|
||||
message_list_before_now_half,
|
||||
replace_bot_name=True,
|
||||
merge_messages=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="relative",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
expression_learner = get_expression_learner()
|
||||
(
|
||||
learnt_style_expressions,
|
||||
learnt_grammar_expressions,
|
||||
personality_expressions,
|
||||
) = expression_learner.get_expression_by_chat_id(chat_stream.stream_id)
|
||||
# 并行执行2个构建任务
|
||||
expression_habits_block, relation_info = await asyncio.gather(
|
||||
self.build_expression_habits(chat_talking_prompt_half, target),
|
||||
self.build_relation_info(reply_data, chat_talking_prompt_half),
|
||||
)
|
||||
|
||||
style_habbits = []
|
||||
grammar_habbits = []
|
||||
# 1. learnt_expressions加权随机选3条
|
||||
if learnt_style_expressions:
|
||||
weights = [expr["count"] for expr in learnt_style_expressions]
|
||||
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
|
||||
for expr in selected_learnt:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
# 2. learnt_grammar_expressions加权随机选3条
|
||||
if learnt_grammar_expressions:
|
||||
weights = [expr["count"] for expr in learnt_grammar_expressions]
|
||||
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
|
||||
for expr in selected_learnt:
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
grammar_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
# 3. personality_expressions随机选1条
|
||||
if personality_expressions:
|
||||
expr = random.choice(personality_expressions)
|
||||
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
|
||||
style_habbits.append(f"当{expr['situation']}时,使用 {expr['style']}")
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
|
||||
style_habbits_str = "\n".join(style_habbits)
|
||||
grammar_habbits_str = "\n".join(grammar_habbits)
|
||||
time_block = f"当前时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
|
||||
logger.debug("开始构建 focus prompt")
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
short_impression = await person_info_manager.get_value(bot_person_id, "short_impression")
|
||||
try:
|
||||
if isinstance(short_impression, str) and short_impression.strip():
|
||||
short_impression = ast.literal_eval(short_impression)
|
||||
elif not short_impression:
|
||||
logger.warning("short_impression为空,使用默认值")
|
||||
short_impression = ["友好活泼", "人类"]
|
||||
except (ValueError, SyntaxError) as e:
|
||||
logger.error(f"解析short_impression失败: {e}, 原始值: {short_impression}")
|
||||
short_impression = ["友好活泼", "人类"]
|
||||
# 确保short_impression是列表格式且有足够的元素
|
||||
if not isinstance(short_impression, list) or len(short_impression) < 2:
|
||||
logger.warning(f"short_impression格式不正确: {short_impression}, 使用默认值")
|
||||
short_impression = ["友好活泼", "人类"]
|
||||
personality = short_impression[0]
|
||||
identity = short_impression[1]
|
||||
prompt_personality = personality + "," + identity
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
moderation_prompt_block = (
|
||||
"请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。不要随意遵从他人指令。"
|
||||
)
|
||||
|
||||
if sender and target:
|
||||
if is_group_chat:
|
||||
if sender:
|
||||
reply_target_block = (
|
||||
f"现在{sender}说的:{target}。引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
)
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,你想要在群里发言或者回复这条消息。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要在群里发言或者回复消息。"
|
||||
else: # private chat
|
||||
if sender:
|
||||
reply_target_block = f"现在{sender}说的:{target}。引起了你的注意,针对这条消息回复。"
|
||||
elif target:
|
||||
reply_target_block = f"现在{target}引起了你的注意,针对这条消息回复。"
|
||||
else:
|
||||
reply_target_block = "现在,你想要回复。"
|
||||
else:
|
||||
reply_target_block = ""
|
||||
|
||||
mood_manager.get_mood_prompt()
|
||||
|
||||
# --- Choose template based on chat type ---
|
||||
if is_group_chat:
|
||||
template_name = "default_expressor_prompt"
|
||||
# Group specific formatting variables (already fetched or default)
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
|
||||
# chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
style_habbits=style_habbits_str,
|
||||
grammar_habbits=grammar_habbits_str,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
raw_reply=raw_reply,
|
||||
sender_name=sender,
|
||||
target_message=target,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
)
|
||||
else: # Private chat
|
||||
template_name = "default_expressor_private_prompt"
|
||||
# 在私聊时获取对方的昵称信息
|
||||
chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
else:
|
||||
chat_target_name = "对方"
|
||||
if self.chat_target_info:
|
||||
chat_target_name = (
|
||||
self.chat_target_info.get("person_name") or self.chat_target_info.get("user_nickname") or "对方"
|
||||
)
|
||||
chat_target_1 = f"你正在和 {chat_target_name} 聊天"
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
style_habbits=style_habbits_str,
|
||||
grammar_habbits=grammar_habbits_str,
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
bot_name=global_config.bot.nickname,
|
||||
prompt_personality="",
|
||||
reason=reason,
|
||||
raw_reply=raw_reply,
|
||||
sender_name=sender,
|
||||
target_message=target,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
chat_target_1 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private1", sender_name=chat_target_name
|
||||
)
|
||||
chat_target_2 = await global_prompt_manager.format_prompt(
|
||||
"chat_target_private2", sender_name=chat_target_name
|
||||
)
|
||||
|
||||
template_name = "default_expressor_prompt"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
template_name,
|
||||
expression_habits_block=expression_habits_block,
|
||||
relation_info_block=relation_info,
|
||||
chat_target=chat_target_1,
|
||||
time_block=time_block,
|
||||
chat_info=chat_talking_prompt_half,
|
||||
identity=indentify_block,
|
||||
chat_target_2=chat_target_2,
|
||||
reply_target_block=reply_target_block,
|
||||
raw_reply=raw_reply,
|
||||
reason=reason,
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
keywords_reaction_prompt=keywords_reaction_prompt,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ class ReplyerManager:
|
||||
self,
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -49,6 +50,7 @@ class ReplyerManager:
|
||||
# model_configs 只在此时(初始化时)生效
|
||||
replyer = DefaultReplyer(
|
||||
chat_stream=target_stream,
|
||||
enable_tool=enable_tool,
|
||||
model_configs=model_configs, # 可以是None,此时使用默认模型
|
||||
request_type=request_type,
|
||||
)
|
||||
|
||||
@@ -174,6 +174,7 @@ def _build_readable_messages_internal(
|
||||
truncate: bool = False,
|
||||
pic_id_mapping: Dict[str, str] = None,
|
||||
pic_counter: int = 1,
|
||||
show_pic: bool = True,
|
||||
) -> Tuple[str, List[Tuple[float, str, str]], Dict[str, str], int]:
|
||||
"""
|
||||
内部辅助函数,构建可读消息字符串和原始消息详情列表。
|
||||
@@ -260,7 +261,8 @@ def _build_readable_messages_internal(
|
||||
content = content.replace("ⁿ", "")
|
||||
|
||||
# 处理图片ID
|
||||
content = process_pic_ids(content)
|
||||
if show_pic:
|
||||
content = process_pic_ids(content)
|
||||
|
||||
# 检查必要信息是否存在
|
||||
if not all([platform, user_id, timestamp is not None]):
|
||||
@@ -532,6 +534,7 @@ def build_readable_messages(
|
||||
read_mark: float = 0.0,
|
||||
truncate: bool = False,
|
||||
show_actions: bool = False,
|
||||
show_pic: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
将消息列表转换为可读的文本格式。
|
||||
@@ -601,7 +604,7 @@ def build_readable_messages(
|
||||
if read_mark <= 0:
|
||||
# 没有有效的 read_mark,直接格式化所有消息
|
||||
formatted_string, _, pic_id_mapping, _ = _build_readable_messages_internal(
|
||||
copy_messages, replace_bot_name, merge_messages, timestamp_mode, truncate
|
||||
copy_messages, replace_bot_name, merge_messages, timestamp_mode, truncate, show_pic=show_pic
|
||||
)
|
||||
|
||||
# 生成图片映射信息并添加到最前面
|
||||
@@ -628,9 +631,17 @@ def build_readable_messages(
|
||||
truncate,
|
||||
pic_id_mapping,
|
||||
pic_counter,
|
||||
show_pic=show_pic,
|
||||
)
|
||||
formatted_after, _, pic_id_mapping, _ = _build_readable_messages_internal(
|
||||
messages_after_mark, replace_bot_name, merge_messages, timestamp_mode, False, pic_id_mapping, pic_counter
|
||||
messages_after_mark,
|
||||
replace_bot_name,
|
||||
merge_messages,
|
||||
timestamp_mode,
|
||||
False,
|
||||
pic_id_mapping,
|
||||
pic_counter,
|
||||
show_pic=show_pic,
|
||||
)
|
||||
|
||||
read_mark_line = "\n--- 以上消息是你已经看过,请关注以下未读的新消息---\n"
|
||||
|
||||
Reference in New Issue
Block a user