feat:新增记忆唤醒流程
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -26,8 +26,8 @@ message_queue_window.bat
|
||||
message_queue_window.txt
|
||||
queue_update.txt
|
||||
memory_graph.gml
|
||||
/src/do_tool/tool_can_use/auto_create_tool.py
|
||||
/src/do_tool/tool_can_use/execute_python_code_tool.py
|
||||
/src/tools/tool_can_use/auto_create_tool.py
|
||||
/src/tools/tool_can_use/execute_python_code_tool.py
|
||||
.env
|
||||
.env.*
|
||||
.cursor
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
MaiCore/MaiBot 0.6路线图 draft
|
||||
|
||||
0.6.3:解决0.6.x版本核心问题,改进功能
|
||||
主要功能加入
|
||||
LPMM全面替代旧知识库
|
||||
采用新的HFC回复模式,取代旧心流
|
||||
合并推理模式和心流模式,根据麦麦自己决策回复模式
|
||||
提供新的表情包系统
|
||||
|
||||
0.6.4:提升用户体验,交互优化
|
||||
加入webui
|
||||
提供麦麦 API
|
||||
修复prompt建构的各种问题
|
||||
修复各种bug
|
||||
调整代码文件结构,重构部分落后设计
|
||||
|
||||
@@ -4,11 +4,10 @@ from src.config.config import global_config
|
||||
from src.plugins.schedule.schedule_generator import bot_schedule
|
||||
from src.common.logger_manager import get_logger
|
||||
from typing import Any, Optional
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager # Module instance
|
||||
from src.heart_flow.mai_state_manager import MaiStateInfo, MaiStateManager
|
||||
from src.heart_flow.subheartflow_manager import SubHeartflowManager
|
||||
from src.heart_flow.mind import Mind
|
||||
from src.heart_flow.interest_logger import InterestLogger # Import InterestLogger
|
||||
from src.heart_flow.background_tasks import BackgroundTaskManager # Import BackgroundTaskManager
|
||||
|
||||
@@ -45,8 +44,6 @@ class Heartflow:
|
||||
self.tool_user_instance = ToolUser() # 工具使用模块
|
||||
self.relationship_manager_instance = relationship_manager # 关系管理模块
|
||||
|
||||
# 子系统初始化
|
||||
self.mind: Mind = Mind(self.subheartflow_manager, self.llm_model) # 思考管理器
|
||||
self.interest_logger: InterestLogger = InterestLogger(self.subheartflow_manager, self) # 兴趣日志记录器
|
||||
|
||||
# 后台任务管理器 (整合所有定时任务)
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.individuality.individuality import Individuality
|
||||
from src.plugins.utils.prompt_builder import global_prompt_manager
|
||||
from src.config.config import global_config
|
||||
|
||||
# Need access to SubHeartflowManager to get minds and update them
|
||||
if TYPE_CHECKING:
|
||||
from src.heart_flow.subheartflow_manager import SubHeartflowManager
|
||||
from src.heart_flow.mai_state_manager import MaiStateInfo
|
||||
|
||||
|
||||
logger = get_logger("sub_heartflow_mind")
|
||||
|
||||
|
||||
class Mind:
|
||||
"""封装 Mai 的思考过程,包括生成内心独白和汇总想法。"""
|
||||
|
||||
def __init__(self, subheartflow_manager: "SubHeartflowManager", llm_model: LLMRequest):
|
||||
self.subheartflow_manager = subheartflow_manager
|
||||
self.llm_model = llm_model
|
||||
self.individuality = Individuality.get_instance()
|
||||
|
||||
async def do_a_thinking(self, current_main_mind: str, mai_state_info: "MaiStateInfo", schedule_info: str):
|
||||
"""
|
||||
执行一次主心流思考过程,生成新的内心独白。
|
||||
|
||||
Args:
|
||||
current_main_mind: 当前的主心流想法。
|
||||
mai_state_info: 当前的 Mai 状态信息 (用于获取 mood)。
|
||||
schedule_info: 当前的日程信息。
|
||||
|
||||
Returns:
|
||||
str: 生成的新的内心独白,如果出错则返回提示信息。
|
||||
"""
|
||||
logger.debug("Mind: 执行思考...")
|
||||
|
||||
# --- 构建 Prompt --- #
|
||||
personality_info = (
|
||||
self.individuality.get_prompt_snippet()
|
||||
if hasattr(self.individuality, "get_prompt_snippet")
|
||||
else self.individuality.personality.personality_core
|
||||
)
|
||||
mood_info = mai_state_info.get_mood_prompt()
|
||||
related_memory_info = "memory" # TODO: Implement memory retrieval
|
||||
|
||||
# Get subflow minds summary via internal method
|
||||
try:
|
||||
sub_flows_info = await self._get_subflows_summary(current_main_mind, mai_state_info)
|
||||
except Exception as e:
|
||||
logger.error(f"[Mind Thinking] 获取子心流想法汇总失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
sub_flows_info = "(获取子心流想法时出错)"
|
||||
|
||||
# Format prompt
|
||||
try:
|
||||
prompt = (await global_prompt_manager.get_prompt_async("thinking_prompt")).format(
|
||||
schedule_info=schedule_info,
|
||||
personality_info=personality_info,
|
||||
related_memory_info=related_memory_info,
|
||||
current_thinking_info=current_main_mind, # Use passed current mind
|
||||
sub_flows_info=sub_flows_info,
|
||||
mood_info=mood_info,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[Mind Thinking] 格式化 thinking_prompt 失败: {e}")
|
||||
return "(思考时格式化Prompt出错...)"
|
||||
|
||||
# --- 调用 LLM --- #
|
||||
try:
|
||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
if not response:
|
||||
logger.warning("[Mind Thinking] 内心独白 LLM 返回空结果。")
|
||||
response = "(暂时没什么想法...)"
|
||||
logger.info(f"Mind: 新想法生成: {response[:100]}...") # Log truncated response
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"[Mind Thinking] 内心独白 LLM 调用失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
return "(思考时调用LLM出错...)"
|
||||
|
||||
async def _get_subflows_summary(self, current_main_mind: str, mai_state_info: "MaiStateInfo") -> str:
|
||||
"""获取所有活跃子心流的想法,并使用 LLM 进行汇总。"""
|
||||
# 1. Get active minds from SubHeartflowManager
|
||||
sub_minds_list = self.subheartflow_manager.get_active_subflow_minds()
|
||||
|
||||
if not sub_minds_list:
|
||||
return "(当前没有活跃的子心流想法)"
|
||||
|
||||
minds_str = "\n".join([f"- {mind}" for mind in sub_minds_list])
|
||||
logger.debug(f"Mind: 获取到 {len(sub_minds_list)} 个子心流想法进行汇总。")
|
||||
|
||||
# 2. Call LLM for summary
|
||||
# --- 构建 Prompt --- #
|
||||
personality_info = (
|
||||
self.individuality.get_prompt_snippet()
|
||||
if hasattr(self.individuality, "get_prompt_snippet")
|
||||
else self.individuality.personality.personality_core
|
||||
)
|
||||
mood_info = mai_state_info.get_mood_prompt()
|
||||
bot_name = global_config.BOT_NICKNAME
|
||||
|
||||
try:
|
||||
prompt = (await global_prompt_manager.get_prompt_async("mind_summary_prompt")).format(
|
||||
personality_info=personality_info,
|
||||
bot_name=bot_name,
|
||||
current_mind=current_main_mind, # Use main mind passed for context
|
||||
minds_str=minds_str,
|
||||
mood_info=mood_info,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[Mind Summary] 格式化 mind_summary_prompt 失败: {e}")
|
||||
return "(汇总想法时格式化Prompt出错...)"
|
||||
|
||||
# --- 调用 LLM --- #
|
||||
try:
|
||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
if not response:
|
||||
logger.warning("[Mind Summary] 想法汇总 LLM 返回空结果。")
|
||||
return "(想法汇总失败...)"
|
||||
logger.debug(f"Mind: 子想法汇总完成: {response[:100]}...")
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"[Mind Summary] 想法汇总 LLM 调用失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
return "(想法汇总时调用LLM出错...)"
|
||||
|
||||
def update_subflows_with_main_mind(self, main_mind: str):
|
||||
"""触发 SubHeartflowManager 更新所有子心流的主心流信息。"""
|
||||
logger.debug("Mind: 请求更新子心流的主想法信息。")
|
||||
self.subheartflow_manager.update_main_mind_in_subflows(main_mind)
|
||||
|
||||
|
||||
# Note: update_current_mind (managing self.current_mind and self.past_mind)
|
||||
# remains in Heartflow for now, as Heartflow is the central coordinator holding the main state.
|
||||
# Mind class focuses solely on the *process* of thinking and summarizing.
|
||||
@@ -13,14 +13,31 @@ from src.plugins.utils.prompt_builder import global_prompt_manager
|
||||
from typing import Optional
|
||||
import difflib
|
||||
from src.plugins.chat.message import MessageRecv # 添加 MessageRecv 导入
|
||||
from heart_flow.observation.observation import Observation
|
||||
from src.heart_flow.observation.observation import Observation
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.heart_flow.utils_chat import get_chat_type_and_target_info
|
||||
from src.plugins.utils.prompt_builder import Prompt
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
Prompt(
|
||||
"""这是qq群聊的聊天记录,请总结以下聊天记录的主题:
|
||||
{chat_logs}
|
||||
请用一句话概括,包括人物、事件和主要信息,不要分点。""",
|
||||
"chat_summary_group_prompt", # Template for group chat
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""这是你和{chat_target}的私聊记录,请总结以下聊天记录的主题:
|
||||
{chat_logs}
|
||||
请用一句话概括,包括事件,时间,和主要信息,不要分点。""",
|
||||
"chat_summary_private_prompt", # Template for private chat
|
||||
)
|
||||
# --- End Prompt Template Definition ---
|
||||
|
||||
|
||||
# 聊天观察
|
||||
class ChattingObservation(Observation):
|
||||
def __init__(self, chat_id):
|
||||
@@ -120,8 +137,6 @@ class ChattingObservation(Observation):
|
||||
content_format = ""
|
||||
accept_format = ""
|
||||
template_items = {}
|
||||
template_name = {}
|
||||
template_default = True
|
||||
|
||||
format_info = {"content_format": content_format, "accept_format": accept_format}
|
||||
template_info = {
|
||||
@@ -134,8 +149,6 @@ class ChattingObservation(Observation):
|
||||
"time": find_msg.get("time"),
|
||||
"group_info": group_info,
|
||||
"user_info": user_info,
|
||||
"format_info": find_msg.get("format_info"),
|
||||
"template_info": find_msg.get("template_info"),
|
||||
"additional_config": find_msg.get("additional_config"),
|
||||
"format_info": format_info,
|
||||
"template_info": template_info,
|
||||
|
||||
@@ -2,28 +2,9 @@
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.utils.prompt_builder import Prompt
|
||||
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
# --- Define Prompt Templates for Chat Summary ---
|
||||
Prompt(
|
||||
"""这是qq群聊的聊天记录,请总结以下聊天记录的主题:
|
||||
{chat_logs}
|
||||
请用一句话概括,包括人物、事件和主要信息,不要分点。""",
|
||||
"chat_summary_group_prompt", # Template for group chat
|
||||
)
|
||||
|
||||
Prompt(
|
||||
"""这是你和{chat_target}的私聊记录,请总结以下聊天记录的主题:
|
||||
{chat_logs}
|
||||
请用一句话概括,包括事件,时间,和主要信息,不要分点。""",
|
||||
"chat_summary_private_prompt", # Template for private chat
|
||||
)
|
||||
# --- End Prompt Template Definition ---
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class Observation:
|
||||
|
||||
@@ -14,7 +14,7 @@ from src.plugins.chat.chat_stream import chat_manager
|
||||
# 导入心流相关类
|
||||
from src.heart_flow.sub_heartflow import SubHeartflow, ChatState
|
||||
from src.heart_flow.mai_state_manager import MaiStateInfo
|
||||
from src.heart_flow.chatting_observation import ChattingObservation
|
||||
from src.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
|
||||
# 导入LLM请求工具
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
|
||||
@@ -1,171 +0,0 @@
|
||||
from .observation.observation import ChattingObservation
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.individuality.individuality import Individuality
|
||||
from ..plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.plugins.utils.json_utils import process_llm_tool_calls
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from src.heart_flow.sub_mind import SubMind
|
||||
|
||||
logger = get_logger("tool_use")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
# ... 原有代码 ...
|
||||
|
||||
# 添加工具执行器提示词
|
||||
tool_executor_prompt = """
|
||||
你是一个专门执行工具的助手。你的名字是{bot_name}。现在是{time_now}。
|
||||
|
||||
你要在群聊中扮演以下角色:
|
||||
{prompt_personality}
|
||||
|
||||
你当前的额外信息:
|
||||
{extra_info}
|
||||
|
||||
你的心情是:{mood_info}
|
||||
|
||||
{relation_prompt}
|
||||
|
||||
群里正在进行的聊天内容:
|
||||
{chat_observe_info}
|
||||
|
||||
请仔细分析聊天内容,考虑以下几点:
|
||||
1. 内容中是否包含需要查询信息的问题
|
||||
2. 是否需要执行特定操作
|
||||
3. 是否有明确的工具使用指令
|
||||
4. 考虑用户与你的关系以及当前的对话氛围
|
||||
|
||||
如果需要使用工具,请直接调用相应的工具函数。如果不需要使用工具,请简单输出"无需使用工具"。
|
||||
尽量只在确实必要时才使用工具。
|
||||
"""
|
||||
Prompt(tool_executor_prompt, "tool_executor_prompt")
|
||||
|
||||
|
||||
class ToolExecutor:
|
||||
def __init__(self, subheartflow_id: str):
|
||||
self.subheartflow_id = subheartflow_id
|
||||
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_summary, # 为工具执行器配置单独的模型
|
||||
# temperature=global_config.llm_summary["temp"],
|
||||
# max_tokens=800,
|
||||
request_type="tool_execution",
|
||||
)
|
||||
self.structured_info = []
|
||||
|
||||
async def execute_tools(
|
||||
self, sub_mind: SubMind, chat_target_name="对方", is_group_chat=False, return_details=False, cycle_info=None
|
||||
):
|
||||
"""
|
||||
并行执行工具,返回结构化信息
|
||||
|
||||
参数:
|
||||
sub_mind: 子思维对象
|
||||
chat_target_name: 聊天目标名称,默认为"对方"
|
||||
is_group_chat: 是否为群聊,默认为False
|
||||
return_details: 是否返回详细信息,默认为False
|
||||
cycle_info: 循环信息对象,可用于记录详细执行信息
|
||||
|
||||
返回:
|
||||
如果return_details为False:
|
||||
List[Dict]: 工具执行结果的结构化信息列表
|
||||
如果return_details为True:
|
||||
Tuple[List[Dict], List[str], str]: (工具执行结果列表, 使用的工具列表, 工具执行提示词)
|
||||
"""
|
||||
# 初始化工具
|
||||
tool_instance = ToolUser()
|
||||
tools = tool_instance._define_tools()
|
||||
|
||||
observation: ChattingObservation = sub_mind.observations[0] if sub_mind.observations else None
|
||||
|
||||
# 获取观察内容
|
||||
chat_observe_info = observation.get_observe_info()
|
||||
person_list = observation.person_list
|
||||
|
||||
# extra structured info
|
||||
extra_structured_info = sub_mind.structured_info_str
|
||||
|
||||
# 构建关系信息
|
||||
relation_prompt = "【关系信息】\n"
|
||||
for person in person_list:
|
||||
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
|
||||
|
||||
# 获取个性信息
|
||||
individuality = Individuality.get_instance()
|
||||
prompt_personality = individuality.get_prompt(x_person=2, level=2)
|
||||
|
||||
# 获取心情信息
|
||||
mood_info = observation.chat_state.mood if hasattr(observation, "chat_state") else ""
|
||||
|
||||
# 获取时间信息
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
# 构建专用于工具调用的提示词
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"tool_executor_prompt",
|
||||
extra_info=extra_structured_info,
|
||||
chat_observe_info=chat_observe_info,
|
||||
chat_target_name=chat_target_name,
|
||||
is_group_chat=is_group_chat,
|
||||
relation_prompt=relation_prompt,
|
||||
prompt_personality=prompt_personality,
|
||||
mood_info=mood_info,
|
||||
bot_name=individuality.name,
|
||||
time_now=time_now,
|
||||
)
|
||||
|
||||
# 如果指定了cycle_info,记录工具执行的prompt
|
||||
if cycle_info:
|
||||
cycle_info.set_tooluse_info(prompt=prompt)
|
||||
|
||||
# 调用LLM,专注于工具使用
|
||||
logger.info(f"开始执行工具调用{prompt}")
|
||||
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
|
||||
|
||||
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
|
||||
# 处理工具调用和结果收集,类似于SubMind中的逻辑
|
||||
new_structured_items = []
|
||||
used_tools = [] # 记录使用了哪些工具
|
||||
|
||||
if tool_calls:
|
||||
success, valid_tool_calls, error_msg = process_llm_tool_calls(tool_calls)
|
||||
if success and valid_tool_calls:
|
||||
for tool_call in valid_tool_calls:
|
||||
try:
|
||||
# 记录使用的工具名称
|
||||
tool_name = tool_call.get("name", "unknown_tool")
|
||||
used_tools.append(tool_name)
|
||||
|
||||
result = await tool_instance._execute_tool_call(tool_call)
|
||||
|
||||
name = result.get("type", "unknown_type")
|
||||
content = result.get("content", "")
|
||||
|
||||
logger.info(f"工具{name},获得信息:{content}")
|
||||
if result:
|
||||
new_item = {
|
||||
"type": result.get("type", "unknown_type"),
|
||||
"id": result.get("id", f"tool_exec_{time.time()}"),
|
||||
"content": result.get("content", ""),
|
||||
"ttl": 3,
|
||||
}
|
||||
new_structured_items.append(new_item)
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}工具执行失败: {e}")
|
||||
|
||||
# 如果指定了cycle_info,记录工具执行结果
|
||||
if cycle_info:
|
||||
cycle_info.set_tooluse_info(tools_used=used_tools, tool_results=new_structured_items)
|
||||
|
||||
# 根据return_details决定返回值
|
||||
if return_details:
|
||||
return new_structured_items, used_tools, prompt
|
||||
else:
|
||||
return new_structured_items
|
||||
|
||||
|
||||
init_prompt()
|
||||
@@ -129,7 +129,8 @@ class CycleAnalyzer:
|
||||
try:
|
||||
duration = float(line[3:].strip().split("秒")[0])
|
||||
total_duration += duration
|
||||
except:
|
||||
except Exception as e:
|
||||
logger.error(f"解析耗时时出错: {e}")
|
||||
pass
|
||||
|
||||
# 解析工具使用
|
||||
@@ -174,7 +175,8 @@ class CycleAnalyzer:
|
||||
timestamp_str = filename.split("_", 2)[2].split(".")[0]
|
||||
timestamp = time.mktime(time.strptime(timestamp_str, "%Y%m%d_%H%M%S"))
|
||||
all_cycles.append((timestamp, stream_id, filepath))
|
||||
except:
|
||||
except Exception as e:
|
||||
logger.error(f"从文件名中提取时间戳时出错: {e}")
|
||||
continue
|
||||
|
||||
# 按时间戳排序,取最新的count个
|
||||
|
||||
@@ -130,9 +130,6 @@ def main():
|
||||
parser = argparse.ArgumentParser(description="HeartFC循环信息查看工具")
|
||||
subparsers = parser.add_subparsers(dest="command", help="子命令")
|
||||
|
||||
# 列出所有聊天流
|
||||
list_streams_parser = subparsers.add_parser("list-streams", help="列出所有聊天流")
|
||||
|
||||
# 分析聊天流
|
||||
analyze_parser = subparsers.add_parser("analyze", help="分析指定聊天流的循环信息")
|
||||
analyze_parser.add_argument("stream_id", help="聊天流ID")
|
||||
|
||||
@@ -168,9 +168,6 @@ class CycleDetail:
|
||||
filename = f"cycle_{cycle_info.cycle_id}_{timestamp}.txt"
|
||||
filepath = os.path.join(stream_dir, filename)
|
||||
|
||||
# 将CycleInfo转换为JSON格式
|
||||
cycle_data = cycle_info.to_dict()
|
||||
|
||||
# 格式化输出成易读的格式
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
# 写入基本信息
|
||||
|
||||
@@ -31,6 +31,7 @@ from src.heart_flow.observation.working_observation import WorkingObservation
|
||||
from src.plugins.heartFC_chat.info_processors.tool_processor import ToolProcessor
|
||||
from src.plugins.heartFC_chat.expressors.default_expressor import DefaultExpressor
|
||||
from src.plugins.heartFC_chat.hfc_utils import _create_empty_anchor_message
|
||||
from src.plugins.heartFC_chat.memory_activator import MemoryActivator
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
@@ -115,39 +116,6 @@ class ActionManager:
|
||||
self._original_actions_backup = None
|
||||
# logger.debug("恢复了原始动作集") # 可选日志
|
||||
|
||||
def clear_actions(self):
|
||||
"""清空所有动作"""
|
||||
self._available_actions.clear()
|
||||
|
||||
def reset_to_default(self):
|
||||
"""重置为默认动作集"""
|
||||
self._available_actions = DEFAULT_ACTIONS.copy()
|
||||
|
||||
|
||||
# 在文件开头添加自定义异常类
|
||||
class HeartFCError(Exception):
|
||||
"""麦麦聊天系统基础异常类"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PlannerError(HeartFCError):
|
||||
"""规划器异常"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ReplierError(HeartFCError):
|
||||
"""回复器异常"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class SenderError(HeartFCError):
|
||||
"""发送器异常"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
async def _handle_cycle_delay(action_taken_this_cycle: bool, cycle_start_time: float, log_prefix: str):
|
||||
"""处理循环延迟"""
|
||||
@@ -202,6 +170,7 @@ class HeartFChatting:
|
||||
self.hfcloop_observation = HFCloopObservation(observe_id=self.stream_id)
|
||||
self.tool_processor = ToolProcessor(subheartflow_id=self.stream_id)
|
||||
self.working_observation = WorkingObservation(observe_id=self.stream_id)
|
||||
self.memory_activator = MemoryActivator()
|
||||
|
||||
# 日志前缀
|
||||
self.log_prefix: str = str(chat_id) # Initial default, will be updated
|
||||
@@ -439,6 +408,8 @@ class HeartFChatting:
|
||||
for observation in observations:
|
||||
logger.debug(f"{self.log_prefix} 观察信息: {observation}")
|
||||
|
||||
running_memorys = await self.memory_activator.activate_memory(observations)
|
||||
|
||||
# 记录并行任务开始时间
|
||||
parallel_start_time = time.time()
|
||||
logger.debug(f"{self.log_prefix} 开始信息处理器并行任务")
|
||||
@@ -446,16 +417,16 @@ class HeartFChatting:
|
||||
# 并行执行两个任务:思考和工具执行
|
||||
with Timer("执行 信息处理器", cycle_timers):
|
||||
# 1. 子思维思考 - 不执行工具调用
|
||||
think_task = asyncio.create_task(self.mind_processor.process_info(observations=observations))
|
||||
think_task = asyncio.create_task(self.mind_processor.process_info(observations=observations,running_memorys=running_memorys))
|
||||
logger.debug(f"{self.log_prefix} 启动子思维思考任务")
|
||||
|
||||
# 2. 工具执行器 - 专门处理工具调用
|
||||
tool_task = asyncio.create_task(self.tool_processor.process_info(observations=observations))
|
||||
tool_task = asyncio.create_task(self.tool_processor.process_info(observations=observations,running_memorys=running_memorys))
|
||||
logger.debug(f"{self.log_prefix} 启动工具执行任务")
|
||||
|
||||
# 3. 聊天信息处理器
|
||||
chatting_info_task = asyncio.create_task(
|
||||
self.chatting_info_processor.process_info(observations=observations)
|
||||
self.chatting_info_processor.process_info(observations=observations,running_memorys=running_memorys)
|
||||
)
|
||||
logger.debug(f"{self.log_prefix} 启动聊天信息处理器任务")
|
||||
|
||||
@@ -578,7 +549,7 @@ class HeartFChatting:
|
||||
return await handler(reasoning, action_data, cycle_timers)
|
||||
else: # no_reply
|
||||
return await handler(reasoning, planner_start_db_time, cycle_timers), ""
|
||||
except HeartFCError as e:
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
|
||||
# 出错时也重置计数器
|
||||
self._lian_xu_bu_hui_fu_ci_shu = 0
|
||||
@@ -738,12 +709,6 @@ class HeartFChatting:
|
||||
history = history[-last_n:]
|
||||
return [cycle.to_dict() for cycle in history]
|
||||
|
||||
def get_last_cycle_info(self) -> Optional[Dict[str, Any]]:
|
||||
"""获取最近一个循环的信息"""
|
||||
if self._cycle_history:
|
||||
return self._cycle_history[-1].to_dict()
|
||||
return None
|
||||
|
||||
async def _planner(self, all_plan_info: List[InfoBase], cycle_timers: dict) -> Dict[str, Any]:
|
||||
"""
|
||||
规划器 (Planner): 使用LLM根据上下文决定是否和如何回复。
|
||||
@@ -995,10 +960,7 @@ class HeartFChatting:
|
||||
reply_text += reply
|
||||
|
||||
self._current_cycle.set_response_info(
|
||||
success=success,
|
||||
reply_text=reply_text,
|
||||
anchor_message=anchor_message,
|
||||
response_text=reply_text,
|
||||
)
|
||||
|
||||
return success, reply_text
|
||||
|
||||
|
||||
@@ -848,7 +848,6 @@ class PromptBuilder:
|
||||
for name in action_keys:
|
||||
desc = current_available_actions[name]
|
||||
action_options_text += f"- '{name}': {desc}\n"
|
||||
example_action_key = action_keys[0] if action_keys else "no_reply"
|
||||
|
||||
planner_prompt_template = await global_prompt_manager.get_prompt_async("planner_prompt")
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ from src.plugins.chat.message import MessageRecv, BaseMessageInfo
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
from src.plugins.chat.message import UserInfo
|
||||
from src.common.logger_manager import get_logger
|
||||
import json
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
@@ -42,3 +43,22 @@ async def _create_empty_anchor_message(
|
||||
logger.error(f"Error getting/creating anchor message: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
return None
|
||||
|
||||
|
||||
def get_keywords_from_json(json_str: str) -> list[str]:
|
||||
# 提取JSON内容
|
||||
start = json_str.find("{")
|
||||
end = json_str.rfind("}") + 1
|
||||
if start == -1 or end == 0:
|
||||
logger.error("未找到有效的JSON内容")
|
||||
return []
|
||||
|
||||
json_content = json_str[start:end]
|
||||
|
||||
# 解析JSON
|
||||
try:
|
||||
json_data = json.loads(json_content)
|
||||
return json_data.get("keywords", [])
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"JSON解析失败: {e}")
|
||||
return []
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Any, Optional
|
||||
from typing import List, Any, Optional, Dict
|
||||
from src.heart_flow.info.info_base import InfoBase
|
||||
from src.heart_flow.chatting_observation import Observation
|
||||
from src.heart_flow.observation.observation import Observation
|
||||
from src.common.logger_manager import get_logger
|
||||
|
||||
logger = get_logger("base_processor")
|
||||
@@ -21,7 +21,7 @@ class BaseProcessor(ABC):
|
||||
|
||||
@abstractmethod
|
||||
async def process_info(
|
||||
self, infos: List[InfoBase], observations: Optional[List[Observation]] = None, **kwargs: Any
|
||||
self, infos: List[InfoBase], observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, **kwargs: Any
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象的抽象方法
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ from src.common.logger_manager import get_logger
|
||||
from src.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.heart_flow.info.cycle_info import CycleInfo
|
||||
|
||||
from typing import Dict
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
"""初始化观察处理器"""
|
||||
super().__init__()
|
||||
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, **kwargs: Any) -> List[InfoBase]:
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, **kwargs: Any) -> List[InfoBase]:
|
||||
"""处理Observation对象
|
||||
|
||||
Args:
|
||||
|
||||
@@ -22,6 +22,7 @@ from src.plugins.heartFC_chat.info_processors.processor_utils import (
|
||||
calculate_replacement_probability,
|
||||
get_spark,
|
||||
)
|
||||
from typing import Dict
|
||||
|
||||
logger = get_logger("sub_heartflow")
|
||||
|
||||
@@ -29,6 +30,7 @@ logger = get_logger("sub_heartflow")
|
||||
def init_prompt():
|
||||
# --- Group Chat Prompt ---
|
||||
group_prompt = """
|
||||
{memory_str}
|
||||
{extra_info}
|
||||
{relation_prompt}
|
||||
你的名字是{bot_name},{prompt_personality},你现在{mood_info}
|
||||
@@ -50,6 +52,7 @@ def init_prompt():
|
||||
|
||||
# --- Private Chat Prompt ---
|
||||
private_prompt = """
|
||||
{memory_str}
|
||||
{extra_info}
|
||||
{relation_prompt}
|
||||
你的名字是{bot_name},{prompt_personality},你现在{mood_info}
|
||||
@@ -121,7 +124,7 @@ class MindProcessor(BaseProcessor):
|
||||
self.structured_info_str = "\n".join(lines)
|
||||
logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}")
|
||||
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, *infos) -> List[dict]:
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos) -> List[dict]:
|
||||
"""处理信息对象
|
||||
|
||||
Args:
|
||||
@@ -130,14 +133,14 @@ class MindProcessor(BaseProcessor):
|
||||
Returns:
|
||||
List[dict]: 处理后的结构化信息列表
|
||||
"""
|
||||
current_mind = await self.do_thinking_before_reply(observations)
|
||||
current_mind = await self.do_thinking_before_reply(observations,running_memorys)
|
||||
|
||||
mind_info = MindInfo()
|
||||
mind_info.set_current_mind(current_mind)
|
||||
|
||||
return [mind_info]
|
||||
|
||||
async def do_thinking_before_reply(self, observations: Optional[List[Observation]] = None):
|
||||
async def do_thinking_before_reply(self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None):
|
||||
"""
|
||||
在回复前进行思考,生成内心想法并收集工具调用结果
|
||||
|
||||
@@ -166,6 +169,12 @@ class MindProcessor(BaseProcessor):
|
||||
f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}"
|
||||
)
|
||||
|
||||
memory_str = ""
|
||||
if running_memorys:
|
||||
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
|
||||
for running_memory in running_memorys:
|
||||
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
|
||||
|
||||
# ---------- 1. 准备基础数据 ----------
|
||||
# 获取现有想法和情绪状态
|
||||
previous_mind = self.current_mind if self.current_mind else ""
|
||||
@@ -210,6 +219,7 @@ class MindProcessor(BaseProcessor):
|
||||
logger.debug(f"{self.log_prefix} 使用{'群聊' if is_group_chat else '私聊'}思考模板")
|
||||
|
||||
prompt = (await global_prompt_manager.get_prompt_async(template_name)).format(
|
||||
memory_str=memory_str,
|
||||
extra_info=self.structured_info_str,
|
||||
prompt_personality=prompt_personality,
|
||||
relation_prompt=relation_prompt,
|
||||
|
||||
@@ -5,11 +5,11 @@ import time
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.individuality.individuality import Individuality
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.plugins.utils.json_utils import process_llm_tool_calls
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from .base_processor import BaseProcessor
|
||||
from typing import List, Optional
|
||||
from typing import List, Optional, Dict
|
||||
from src.heart_flow.observation.observation import Observation
|
||||
from src.heart_flow.observation.working_observation import WorkingObservation
|
||||
from src.heart_flow.info.structured_info import StructuredInfo
|
||||
@@ -30,6 +30,8 @@ def init_prompt():
|
||||
你当前的额外信息:
|
||||
{extra_info}
|
||||
|
||||
{memory_str}
|
||||
|
||||
你的心情是:{mood_info}
|
||||
|
||||
{relation_prompt}
|
||||
@@ -61,7 +63,7 @@ class ToolProcessor(BaseProcessor):
|
||||
)
|
||||
self.structured_info = []
|
||||
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, *infos) -> List[dict]:
|
||||
async def process_info(self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos) -> List[dict]:
|
||||
"""处理信息对象
|
||||
|
||||
Args:
|
||||
@@ -74,7 +76,7 @@ class ToolProcessor(BaseProcessor):
|
||||
if observations:
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
result, used_tools, prompt = await self.execute_tools(observation)
|
||||
result, used_tools, prompt = await self.execute_tools(observation, running_memorys)
|
||||
|
||||
# 更新WorkingObservation中的结构化信息
|
||||
for observation in observations:
|
||||
@@ -92,7 +94,7 @@ class ToolProcessor(BaseProcessor):
|
||||
|
||||
return [structured_info]
|
||||
|
||||
async def execute_tools(self, observation: ChattingObservation):
|
||||
async def execute_tools(self, observation: ChattingObservation, running_memorys: Optional[List[Dict]] = None):
|
||||
"""
|
||||
并行执行工具,返回结构化信息
|
||||
|
||||
@@ -112,24 +114,22 @@ class ToolProcessor(BaseProcessor):
|
||||
tool_instance = ToolUser()
|
||||
tools = tool_instance._define_tools()
|
||||
|
||||
logger.debug(f"observation: {observation}")
|
||||
logger.debug(f"observation.chat_target_info: {observation.chat_target_info}")
|
||||
logger.debug(f"observation.is_group_chat: {observation.is_group_chat}")
|
||||
logger.debug(f"observation.person_list: {observation.person_list}")
|
||||
# logger.debug(f"observation: {observation}")
|
||||
# logger.debug(f"observation.chat_target_info: {observation.chat_target_info}")
|
||||
# logger.debug(f"observation.is_group_chat: {observation.is_group_chat}")
|
||||
# logger.debug(f"observation.person_list: {observation.person_list}")
|
||||
|
||||
is_group_chat = observation.is_group_chat
|
||||
if not is_group_chat:
|
||||
chat_target_name = (
|
||||
observation.chat_target_info.get("person_name")
|
||||
or observation.chat_target_info.get("user_nickname")
|
||||
or "对方"
|
||||
)
|
||||
else:
|
||||
chat_target_name = "群聊"
|
||||
|
||||
chat_observe_info = observation.get_observe_info()
|
||||
person_list = observation.person_list
|
||||
|
||||
memory_str = ""
|
||||
if running_memorys:
|
||||
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
|
||||
for running_memory in running_memorys:
|
||||
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
|
||||
|
||||
# 构建关系信息
|
||||
relation_prompt = "【关系信息】\n"
|
||||
for person in person_list:
|
||||
@@ -148,6 +148,7 @@ class ToolProcessor(BaseProcessor):
|
||||
# 构建专用于工具调用的提示词
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"tool_executor_prompt",
|
||||
memory_str=memory_str,
|
||||
extra_info="extra_structured_info",
|
||||
chat_observe_info=chat_observe_info,
|
||||
# chat_target_name=chat_target_name,
|
||||
|
||||
70
src/plugins/heartFC_chat/memory_activator.py
Normal file
70
src/plugins/heartFC_chat/memory_activator.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from src.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.heart_flow.observation.working_observation import WorkingObservation
|
||||
from src.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.plugins.heartFC_chat.hfc_utils import get_keywords_from_json
|
||||
from datetime import datetime
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from typing import List, Dict
|
||||
|
||||
|
||||
logger = get_logger("memory_activator")
|
||||
|
||||
Prompt(
|
||||
"""
|
||||
你是一个记忆分析器,你需要根据以下信息来进行会议
|
||||
以下是一场聊天中的信息,请根据这些信息,总结出几个关键词作为记忆回忆的触发词
|
||||
|
||||
{obs_info_text}
|
||||
|
||||
请输出一个json格式,包含以下字段:
|
||||
{
|
||||
"keywords": ["关键词1", "关键词2", "关键词3",......]
|
||||
}
|
||||
不要输出其他多余内容,只输出json格式就好
|
||||
""",
|
||||
"memory_activator_prompt",
|
||||
)
|
||||
|
||||
|
||||
class MemoryActivator:
|
||||
def __init__(self):
|
||||
self.summart_model = LLMRequest(
|
||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
self.running_memory = []
|
||||
|
||||
async def activate_memory(self, observations) -> List[Dict]:
|
||||
obs_info_text = ""
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
obs_info_text += observation.get_observe_info()
|
||||
elif isinstance(observation, WorkingObservation):
|
||||
working_info = observation.get_observe_info()
|
||||
for working_info_item in working_info:
|
||||
obs_info_text += f"{working_info_item['type']}: {working_info_item['content']}\n"
|
||||
elif isinstance(observation, HFCloopObservation):
|
||||
obs_info_text += observation.get_observe_info()
|
||||
|
||||
prompt = global_prompt_manager.format_prompt("memory_activator_prompt", obs_info_text=obs_info_text)
|
||||
|
||||
response = self.summart_model.generate_response(prompt)
|
||||
|
||||
keywords = get_keywords_from_json(response)
|
||||
|
||||
# 调用记忆系统获取相关记忆
|
||||
related_memory = await HippocampusManager.get_instance().get_memory_from_topic(
|
||||
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||
)
|
||||
|
||||
logger.debug(f"获取到的记忆: {related_memory}")
|
||||
|
||||
if related_memory:
|
||||
for topic, memory in related_memory:
|
||||
self.running_memory.append({"topic": topic, "content": memory, "timestamp": datetime.now().isoformat()})
|
||||
logger.debug(f"添加新记忆: {topic} - {memory}")
|
||||
|
||||
return self.running_memory
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.config.config import global_config
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
|
||||
|
||||
logger = get_logger("relationship_tool")
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.plugins.schedule.schedule_generator import bot_schedule
|
||||
from src.common.logger import get_module_logger
|
||||
from typing import Any
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from src.common.logger import get_module_logger
|
||||
from typing import Dict, Any
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger import get_module_logger
|
||||
from typing import Any
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
from typing import Any
|
||||
@@ -9,7 +9,7 @@
|
||||
每个工具应该继承 `BaseTool` 基类并实现必要的属性和方法:
|
||||
|
||||
```python
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool, register_tool
|
||||
|
||||
class MyNewTool(BaseTool):
|
||||
# 工具名称,必须唯一
|
||||
@@ -86,7 +86,7 @@ register_tool(MyNewTool)
|
||||
## 使用示例
|
||||
|
||||
```python
|
||||
from src.do_tool.tool_use import ToolUser
|
||||
from src.tools.tool_use import ToolUser
|
||||
|
||||
# 创建工具用户
|
||||
tool_user = ToolUser()
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import (
|
||||
from src.tools.tool_can_use.base_tool import (
|
||||
BaseTool,
|
||||
register_tool,
|
||||
discover_tools,
|
||||
@@ -81,7 +81,7 @@ def discover_tools():
|
||||
continue
|
||||
|
||||
# 导入模块
|
||||
module = importlib.import_module(f"src.do_tool.{package_name}.{module_name}")
|
||||
module = importlib.import_module(f"src.tools.{package_name}.{module_name}")
|
||||
|
||||
# 查找模块中的工具类
|
||||
for _, obj in inspect.getmembers(module):
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger import get_module_logger
|
||||
from typing import Any
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.plugins.chat.utils import get_embedding
|
||||
from src.common.database import db
|
||||
from src.common.logger_manager import get_logger
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger_manager import get_logger
|
||||
from typing import Dict, Any
|
||||
from datetime import datetime
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.plugins.chat.utils import get_embedding
|
||||
|
||||
# from src.common.database import db
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool
|
||||
from src.tools.tool_can_use.base_tool import BaseTool, register_tool
|
||||
from src.plugins.person_info.person_info import person_info_manager
|
||||
from src.common.logger_manager import get_logger
|
||||
import time
|
||||
@@ -2,12 +2,12 @@ from src.plugins.models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import json
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance
|
||||
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
|
||||
import traceback
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from src.plugins.chat.utils import parse_text_timestamps
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
from src.heart_flow.chatting_observation import ChattingObservation
|
||||
from src.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
|
||||
logger = get_logger("tool_use")
|
||||
|
||||
Reference in New Issue
Block a user