feat: 新增HFC处理器自定义项和思考间隔项
新增了 HFC不同处理器的开启关闭可选项 新增了思考间隔调整 移除无用工具
This commit is contained in:
@@ -25,6 +25,7 @@ from src.chat.focus_chat.info_processors.self_processor import SelfProcessor
|
||||
from src.chat.focus_chat.planners.planner import ActionPlanner
|
||||
from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.chat.focus_chat.working_memory.working_memory import WorkingMemory
|
||||
from src.config.config import global_config
|
||||
|
||||
install(extra_lines=3)
|
||||
|
||||
@@ -257,6 +258,8 @@ class HeartFChatting:
|
||||
f"动作: {self._current_cycle.loop_plan_info['action_result']['action_type']}"
|
||||
+ (f"\n详情: {'; '.join(timer_strings)}" if timer_strings else "")
|
||||
)
|
||||
|
||||
await asyncio.sleep(global_config.focus_chat.think_interval)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
# 设置了关闭标志位后被取消是正常流程
|
||||
|
||||
@@ -86,7 +86,6 @@ class MaiStateManager:
|
||||
current_time = time.time()
|
||||
current_status = current_state_info.mai_status
|
||||
time_in_current_status = current_time - current_state_info.last_status_change_time
|
||||
_time_since_last_min_check = current_time - current_state_info.last_min_check_time
|
||||
next_state: Optional[MaiState] = None
|
||||
|
||||
def _resolve_offline(candidate_state: MaiState) -> MaiState:
|
||||
|
||||
@@ -38,10 +38,8 @@ class NormalChat:
|
||||
# Interest dict
|
||||
self.interest_dict = interest_dict or {}
|
||||
|
||||
# --- Initialize attributes (defaults) ---
|
||||
self.is_group_chat: bool = False
|
||||
self.chat_target_info: Optional[dict] = None
|
||||
# --- End Initialization ---
|
||||
|
||||
# Other sync initializations
|
||||
self.gpt = NormalChatGenerator()
|
||||
@@ -51,8 +49,7 @@ class NormalChat:
|
||||
self._chat_task: Optional[asyncio.Task] = None
|
||||
self._initialized = False # Track initialization status
|
||||
|
||||
# logger.info(f"[{self.stream_name}] NormalChat 实例 __init__ 完成 (同步部分)。")
|
||||
# Avoid logging here as stream_name might not be final
|
||||
|
||||
|
||||
async def initialize(self):
|
||||
"""异步初始化,获取聊天类型和目标信息。"""
|
||||
|
||||
@@ -148,6 +148,17 @@ class FocusChatConfig(ConfigBase):
|
||||
compress_length_limit: int = 5
|
||||
"""最多压缩份数,超过该数值的压缩上下文会被删除"""
|
||||
|
||||
think_interval: int = 1
|
||||
"""思考间隔(秒)"""
|
||||
|
||||
self_identify_processor: bool = True
|
||||
"""是否启用自我识别处理器"""
|
||||
|
||||
tool_use_processor: bool = True
|
||||
"""是否启用工具使用处理器"""
|
||||
|
||||
working_memory_processor: bool = True
|
||||
"""是否启用工作记忆处理器"""
|
||||
|
||||
@dataclass
|
||||
class ExpressionConfig(ConfigBase):
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger import get_module_logger
|
||||
from typing import Any
|
||||
|
||||
logger = get_module_logger("get_mid_memory_tool")
|
||||
|
||||
|
||||
class GetMidMemoryTool(BaseTool):
|
||||
"""从记忆系统中获取相关记忆的工具"""
|
||||
|
||||
name = "mid_chat_mem"
|
||||
description = "之前的聊天内容概述id中获取具体信息,如果没有聊天内容概述id,就不要使用"
|
||||
parameters = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {"type": "integer", "description": "要查询的聊天记录概述id"},
|
||||
},
|
||||
"required": ["id"],
|
||||
}
|
||||
|
||||
async def execute(self, function_args: dict[str, Any], message_txt: str = "") -> dict[str, Any]:
|
||||
"""执行记忆获取
|
||||
|
||||
Args:
|
||||
function_args: 工具参数
|
||||
message_txt: 原始消息文本
|
||||
|
||||
Returns:
|
||||
dict: 工具执行结果
|
||||
"""
|
||||
try:
|
||||
id = function_args.get("id")
|
||||
return {"name": "mid_chat_mem", "content": str(id)}
|
||||
except Exception as e:
|
||||
logger.error(f"聊天记录获取工具执行失败: {str(e)}")
|
||||
return {"name": "mid_chat_mem", "content": f"聊天记录获取失败: {str(e)}"}
|
||||
|
||||
|
||||
# 注册工具
|
||||
# register_tool(GetMemoryTool)
|
||||
@@ -1,25 +0,0 @@
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
from typing import Any
|
||||
|
||||
logger = get_module_logger("send_emoji_tool")
|
||||
|
||||
|
||||
class SendEmojiTool(BaseTool):
|
||||
"""发送表情包的工具"""
|
||||
|
||||
name = "send_emoji"
|
||||
description = "当你觉得需要表达情感,或者帮助表达,可以使用这个工具发送表情包"
|
||||
parameters = {
|
||||
"type": "object",
|
||||
"properties": {"text": {"type": "string", "description": "要发送的表情包描述"}},
|
||||
"required": ["text"],
|
||||
}
|
||||
|
||||
async def execute(self, function_args: dict[str, Any], message_txt: str = "") -> dict[str, Any]:
|
||||
text = function_args.get("text", message_txt)
|
||||
return {
|
||||
"name": "send_emoji",
|
||||
"content": text,
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
from src.tools.tool_can_use.base_tool import BaseTool
|
||||
from src.common.logger_manager import get_logger
|
||||
from typing import Dict, Any
|
||||
from datetime import datetime
|
||||
import time
|
||||
|
||||
logger = get_logger("get_time_date")
|
||||
|
||||
|
||||
class GetCurrentDateTimeTool(BaseTool):
|
||||
"""获取当前时间、日期、年份和星期的工具"""
|
||||
|
||||
name = "get_current_date_time"
|
||||
description = "当有人询问或者涉及到具体时间或者日期的时候,必须使用这个工具"
|
||||
parameters = {
|
||||
"type": "object",
|
||||
"properties": {},
|
||||
"required": [],
|
||||
}
|
||||
|
||||
async def execute(self, function_args: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""执行获取当前时间、日期、年份和星期
|
||||
|
||||
Args:
|
||||
function_args: 工具参数(此工具不使用)
|
||||
|
||||
Returns:
|
||||
Dict: 工具执行结果
|
||||
"""
|
||||
current_time = datetime.now().strftime("%H:%M:%S")
|
||||
current_date = datetime.now().strftime("%Y-%m-%d")
|
||||
current_year = datetime.now().strftime("%Y")
|
||||
current_weekday = datetime.now().strftime("%A")
|
||||
|
||||
return {
|
||||
"type": "time_info",
|
||||
"id": f"time_info_{time.time()}",
|
||||
"content": f"当前时间: {current_time}, 日期: {current_date}, 年份: {current_year}, 星期: {current_weekday}",
|
||||
}
|
||||
@@ -3,56 +3,14 @@ from src.config.config import global_config
|
||||
import json
|
||||
from src.common.logger_manager import get_logger
|
||||
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
|
||||
import traceback
|
||||
from src.chat.person_info.relationship_manager import relationship_manager
|
||||
from src.chat.utils.utils import parse_text_timestamps
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
|
||||
logger = get_logger("tool_use")
|
||||
|
||||
|
||||
class ToolUser:
|
||||
def __init__(self):
|
||||
self.llm_model_tool = LLMRequest(
|
||||
model=global_config.model.tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
async def _build_tool_prompt(
|
||||
message_txt: str, chat_stream: ChatStream = None, observation: ChattingObservation = None
|
||||
):
|
||||
"""构建工具使用的提示词
|
||||
|
||||
Args:
|
||||
message_txt: 用户消息文本
|
||||
subheartflow: 子心流对象
|
||||
|
||||
Returns:
|
||||
str: 构建好的提示词
|
||||
"""
|
||||
|
||||
if observation:
|
||||
mid_memory_info = observation.mid_memory_info
|
||||
# print(f"intol111111111111111111111111111111111222222222222mid_memory_info:{mid_memory_info}")
|
||||
|
||||
# 这些信息应该从调用者传入,而不是从self获取
|
||||
bot_name = global_config.bot.nickname
|
||||
prompt = ""
|
||||
prompt += mid_memory_info
|
||||
prompt += "你正在思考如何回复群里的消息。\n"
|
||||
prompt += "之前群里进行了如下讨论:\n"
|
||||
prompt += message_txt
|
||||
# prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
|
||||
prompt += f"注意你就是{bot_name},{bot_name}是你的名字。根据之前的聊天记录补充问题信息,搜索时避开你的名字。\n"
|
||||
# prompt += "必须调用 'lpmm_get_knowledge' 工具来获取知识。\n"
|
||||
prompt += "你现在需要对群里的聊天内容进行回复,请你思考应该使用什么工具,然后选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
|
||||
|
||||
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
|
||||
prompt = parse_text_timestamps(prompt, mode="lite")
|
||||
|
||||
return prompt
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _define_tools():
|
||||
"""获取所有已注册工具的定义
|
||||
@@ -100,93 +58,3 @@ class ToolUser:
|
||||
except Exception as e:
|
||||
logger.error(f"执行工具调用时发生错误: {str(e)}")
|
||||
return None
|
||||
|
||||
async def use_tool(self, message_txt: str, chat_stream: ChatStream = None, observation: ChattingObservation = None):
|
||||
"""使用工具辅助思考,判断是否需要额外信息
|
||||
|
||||
Args:
|
||||
message_txt: 用户消息文本
|
||||
chat_stream: 聊天流对象
|
||||
observation: 观察对象(可选)
|
||||
|
||||
Returns:
|
||||
dict: 工具使用结果,包含结构化的信息
|
||||
"""
|
||||
try:
|
||||
# 构建提示词
|
||||
prompt = await self._build_tool_prompt(
|
||||
message_txt=message_txt,
|
||||
chat_stream=chat_stream,
|
||||
observation=observation,
|
||||
)
|
||||
|
||||
# 定义可用工具
|
||||
tools = self._define_tools()
|
||||
logger.trace(f"工具定义: {tools}")
|
||||
|
||||
# 使用llm_model_tool发送带工具定义的请求
|
||||
payload = {
|
||||
"model": self.llm_model_tool.model_name,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"tools": tools,
|
||||
"temperature": 0.2,
|
||||
}
|
||||
|
||||
logger.trace(f"发送工具调用请求,模型: {self.llm_model_tool.model_name}")
|
||||
# 发送请求获取模型是否需要调用工具
|
||||
response = await self.llm_model_tool._execute_request(
|
||||
endpoint="/chat/completions", payload=payload, prompt=prompt
|
||||
)
|
||||
|
||||
# 根据返回值数量判断是否有工具调用
|
||||
if len(response) == 3:
|
||||
content, reasoning_content, tool_calls = response
|
||||
# logger.info(f"工具思考: {tool_calls}")
|
||||
# logger.debug(f"工具思考: {content}")
|
||||
|
||||
# 检查响应中工具调用是否有效
|
||||
if not tool_calls:
|
||||
logger.debug("模型返回了空的tool_calls列表")
|
||||
return {"used_tools": False}
|
||||
|
||||
tool_calls_str = ""
|
||||
for tool_call in tool_calls:
|
||||
tool_calls_str += f"{tool_call['function']['name']}\n"
|
||||
logger.info(
|
||||
f"根据:\n{prompt}\n\n内容:{content}\n\n模型请求调用{len(tool_calls)}个工具: {tool_calls_str}"
|
||||
)
|
||||
tool_results = []
|
||||
structured_info = {} # 动态生成键
|
||||
|
||||
# 执行所有工具调用
|
||||
for tool_call in tool_calls:
|
||||
result = await self._execute_tool_call(tool_call)
|
||||
if result:
|
||||
tool_results.append(result)
|
||||
# 使用工具名称作为键
|
||||
tool_name = result["name"]
|
||||
if tool_name not in structured_info:
|
||||
structured_info[tool_name] = []
|
||||
structured_info[tool_name].append({"name": result["name"], "content": result["content"]})
|
||||
|
||||
# 如果有工具结果,返回结构化的信息
|
||||
if structured_info:
|
||||
logger.debug(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}")
|
||||
return {"used_tools": True, "structured_info": structured_info}
|
||||
else:
|
||||
# 没有工具调用
|
||||
content, reasoning_content = response
|
||||
logger.debug("模型没有请求调用任何工具")
|
||||
|
||||
# 如果没有工具调用或处理失败,直接返回原始思考
|
||||
return {
|
||||
"used_tools": False,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"工具调用过程中出错: {str(e)}")
|
||||
logger.error(f"工具调用过程中出错: {traceback.format_exc()}")
|
||||
return {
|
||||
"used_tools": False,
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
@@ -85,10 +85,19 @@ reply_trigger_threshold = 3.0 # 专注聊天触发阈值,越低越容易进入
|
||||
default_decay_rate_per_second = 0.98 # 默认衰减率,越大衰减越快,越高越难进入专注聊天
|
||||
consecutive_no_reply_threshold = 3 # 连续不回复的阈值,越低越容易结束专注聊天
|
||||
|
||||
think_interval = 1 # 思考间隔 单位秒
|
||||
|
||||
observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖
|
||||
compressed_length = 5 # 不能大于chat.observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||
compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
|
||||
[focus_chat.processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗
|
||||
self_identify_processor = true # 是否启用自我识别处理器
|
||||
tool_use_processor = true # 是否启用工具使用处理器
|
||||
working_memory_processor = true # 是否启用工作记忆处理器
|
||||
|
||||
|
||||
|
||||
[expression]
|
||||
# 表达方式
|
||||
expression_style = "描述麦麦说话的表达风格,表达习惯"
|
||||
|
||||
Reference in New Issue
Block a user