feat:拆分重命名模型配置,修复动作恢复问题
This commit is contained in:
@@ -78,10 +78,10 @@ class DefaultExpressor:
|
|||||||
self.log_prefix = "expressor"
|
self.log_prefix = "expressor"
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.express_model = LLMRequest(
|
self.express_model = LLMRequest(
|
||||||
model=global_config.model.normal,
|
model=global_config.model.focus_expressor,
|
||||||
temperature=global_config.model.normal["temp"],
|
temperature=global_config.model.focus_expressor["temp"],
|
||||||
max_tokens=256,
|
max_tokens=256,
|
||||||
request_type="response_heartflow",
|
request_type="focus_expressor",
|
||||||
)
|
)
|
||||||
self.heart_fc_sender = HeartFCSender()
|
self.heart_fc_sender = HeartFCSender()
|
||||||
|
|
||||||
|
|||||||
@@ -27,9 +27,6 @@ class ActionProcessor(BaseProcessor):
|
|||||||
"""初始化观察处理器"""
|
"""初始化观察处理器"""
|
||||||
super().__init__()
|
super().__init__()
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.model_summary = LLMRequest(
|
|
||||||
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
|
||||||
)
|
|
||||||
|
|
||||||
async def process_info(
|
async def process_info(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@@ -71,10 +71,10 @@ class MindProcessor(BaseProcessor):
|
|||||||
self.subheartflow_id = subheartflow_id
|
self.subheartflow_id = subheartflow_id
|
||||||
|
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.sub_heartflow,
|
model=global_config.model.focus_chat_mind,
|
||||||
temperature=global_config.model.sub_heartflow["temp"],
|
temperature=global_config.model.focus_chat_mind["temp"],
|
||||||
max_tokens=800,
|
max_tokens=800,
|
||||||
request_type="sub_heart_flow",
|
request_type="focus_chat_mind",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.current_mind = ""
|
self.current_mind = ""
|
||||||
|
|||||||
@@ -54,10 +54,10 @@ class SelfProcessor(BaseProcessor):
|
|||||||
self.subheartflow_id = subheartflow_id
|
self.subheartflow_id = subheartflow_id
|
||||||
|
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.sub_heartflow,
|
model=global_config.model.focus_self_recognize,
|
||||||
temperature=global_config.model.sub_heartflow["temp"],
|
temperature=global_config.model.focus_self_recognize["temp"],
|
||||||
max_tokens=800,
|
max_tokens=800,
|
||||||
request_type="self_identify",
|
request_type="focus_self_identify",
|
||||||
)
|
)
|
||||||
|
|
||||||
name = chat_manager.get_stream_name(self.subheartflow_id)
|
name = chat_manager.get_stream_name(self.subheartflow_id)
|
||||||
|
|||||||
@@ -49,9 +49,9 @@ class ToolProcessor(BaseProcessor):
|
|||||||
self.subheartflow_id = subheartflow_id
|
self.subheartflow_id = subheartflow_id
|
||||||
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
self.log_prefix = f"[{subheartflow_id}:ToolExecutor] "
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.tool_use,
|
model=global_config.model.focus_tool_use,
|
||||||
max_tokens=500,
|
max_tokens=500,
|
||||||
request_type="tool_execution",
|
request_type="focus_tool",
|
||||||
)
|
)
|
||||||
self.structured_info = []
|
self.structured_info = []
|
||||||
|
|
||||||
|
|||||||
@@ -61,10 +61,10 @@ class WorkingMemoryProcessor(BaseProcessor):
|
|||||||
self.subheartflow_id = subheartflow_id
|
self.subheartflow_id = subheartflow_id
|
||||||
|
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.sub_heartflow,
|
model=global_config.model.focus_chat_mind,
|
||||||
temperature=global_config.model.sub_heartflow["temp"],
|
temperature=global_config.model.focus_chat_mind["temp"],
|
||||||
max_tokens=800,
|
max_tokens=800,
|
||||||
request_type="working_memory",
|
request_type="focus_working_memory",
|
||||||
)
|
)
|
||||||
|
|
||||||
name = chat_manager.get_stream_name(self.subheartflow_id)
|
name = chat_manager.get_stream_name(self.subheartflow_id)
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ class MemoryActivator:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.summary_model = LLMRequest(
|
self.summary_model = LLMRequest(
|
||||||
model=global_config.model.summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
|
model=global_config.model.memory_summary, temperature=0.7, max_tokens=50, request_type="chat_observation"
|
||||||
)
|
)
|
||||||
self.running_memory = []
|
self.running_memory = []
|
||||||
|
|
||||||
|
|||||||
@@ -28,8 +28,7 @@ class ActionManager:
|
|||||||
self._registered_actions: Dict[str, ActionInfo] = {}
|
self._registered_actions: Dict[str, ActionInfo] = {}
|
||||||
# 当前正在使用的动作集合,默认加载默认动作
|
# 当前正在使用的动作集合,默认加载默认动作
|
||||||
self._using_actions: Dict[str, ActionInfo] = {}
|
self._using_actions: Dict[str, ActionInfo] = {}
|
||||||
# 临时备份原始使用中的动作
|
|
||||||
self._original_actions_backup: Optional[Dict[str, ActionInfo]] = None
|
|
||||||
|
|
||||||
# 默认动作集,仅作为快照,用于恢复默认
|
# 默认动作集,仅作为快照,用于恢复默认
|
||||||
self._default_actions: Dict[str, ActionInfo] = {}
|
self._default_actions: Dict[str, ActionInfo] = {}
|
||||||
@@ -278,22 +277,18 @@ class ActionManager:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def temporarily_remove_actions(self, actions_to_remove: List[str]) -> None:
|
def temporarily_remove_actions(self, actions_to_remove: List[str]) -> None:
|
||||||
"""临时移除使用集中的指定动作,备份原始使用集"""
|
"""临时移除使用集中的指定动作"""
|
||||||
if self._original_actions_backup is None:
|
|
||||||
self._original_actions_backup = self._using_actions.copy()
|
|
||||||
for name in actions_to_remove:
|
for name in actions_to_remove:
|
||||||
self._using_actions.pop(name, None)
|
self._using_actions.pop(name, None)
|
||||||
|
|
||||||
def restore_actions(self) -> None:
|
def restore_actions(self) -> None:
|
||||||
"""恢复之前备份的原始使用集"""
|
"""恢复到默认动作集"""
|
||||||
if self._original_actions_backup is not None:
|
logger.debug(f"恢复动作集: 从 {list(self._using_actions.keys())} 恢复到默认动作集 {list(self._default_actions.keys())}")
|
||||||
self._using_actions = self._original_actions_backup.copy()
|
self._using_actions = self._default_actions.copy()
|
||||||
self._original_actions_backup = None
|
|
||||||
|
|
||||||
def restore_default_actions(self) -> None:
|
def restore_default_actions(self) -> None:
|
||||||
"""恢复默认动作集到使用集"""
|
"""恢复默认动作集到使用集"""
|
||||||
self._using_actions = self._default_actions.copy()
|
self._using_actions = self._default_actions.copy()
|
||||||
self._original_actions_backup = None
|
|
||||||
|
|
||||||
def get_action(self, action_name: str) -> Optional[Type[BaseAction]]:
|
def get_action(self, action_name: str) -> Optional[Type[BaseAction]]:
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -78,9 +78,9 @@ class ActionPlanner:
|
|||||||
self.log_prefix = log_prefix
|
self.log_prefix = log_prefix
|
||||||
# LLM规划器配置
|
# LLM规划器配置
|
||||||
self.planner_llm = LLMRequest(
|
self.planner_llm = LLMRequest(
|
||||||
model=global_config.model.plan,
|
model=global_config.model.focus_planner,
|
||||||
max_tokens=1000,
|
max_tokens=1000,
|
||||||
request_type="action_planning", # 用于动作规划
|
request_type="focus_planner", # 用于动作规划
|
||||||
)
|
)
|
||||||
|
|
||||||
self.action_manager = action_manager
|
self.action_manager = action_manager
|
||||||
@@ -161,6 +161,10 @@ class ActionPlanner:
|
|||||||
action = "no_reply"
|
action = "no_reply"
|
||||||
reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划"
|
reasoning = "没有可用的动作" if not current_available_actions else "只有no_reply动作可用,跳过规划"
|
||||||
logger.info(f"{self.log_prefix}{reasoning}")
|
logger.info(f"{self.log_prefix}{reasoning}")
|
||||||
|
self.action_manager.restore_actions()
|
||||||
|
logger.debug(
|
||||||
|
f"{self.log_prefix}恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
|
||||||
|
)
|
||||||
return {
|
return {
|
||||||
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
|
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
|
||||||
"current_mind": current_mind,
|
"current_mind": current_mind,
|
||||||
@@ -241,10 +245,10 @@ class ActionPlanner:
|
|||||||
f"{self.log_prefix}规划器Prompt:\n{prompt}\n\n决策动作:{action},\n动作信息: '{action_data}'\n理由: {reasoning}"
|
f"{self.log_prefix}规划器Prompt:\n{prompt}\n\n决策动作:{action},\n动作信息: '{action_data}'\n理由: {reasoning}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# 恢复原始动作集
|
# 恢复到默认动作集
|
||||||
self.action_manager.restore_actions()
|
self.action_manager.restore_actions()
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f"{self.log_prefix}恢复了原始动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
|
f"{self.log_prefix}恢复到默认动作集, 当前可用: {list(self.action_manager.get_using_actions().keys())}"
|
||||||
)
|
)
|
||||||
|
|
||||||
action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning}
|
action_result = {"action_type": action, "action_data": action_data, "reasoning": reasoning}
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class MemoryManager:
|
|||||||
self._id_map: Dict[str, MemoryItem] = {}
|
self._id_map: Dict[str, MemoryItem] = {}
|
||||||
|
|
||||||
self.llm_summarizer = LLMRequest(
|
self.llm_summarizer = LLMRequest(
|
||||||
model=global_config.model.summary, temperature=0.3, max_tokens=512, request_type="memory_summarization"
|
model=global_config.model.focus_working_memory, temperature=0.3, max_tokens=512, request_type="memory_summarization"
|
||||||
)
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -88,34 +88,34 @@ class BackgroundTaskManager:
|
|||||||
f"聊天状态更新任务已启动 间隔:{STATE_UPDATE_INTERVAL_SECONDS}s",
|
f"聊天状态更新任务已启动 间隔:{STATE_UPDATE_INTERVAL_SECONDS}s",
|
||||||
"_state_update_task",
|
"_state_update_task",
|
||||||
),
|
),
|
||||||
(
|
|
||||||
self._run_cleanup_cycle,
|
|
||||||
"info",
|
|
||||||
f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
|
|
||||||
"_cleanup_task",
|
|
||||||
),
|
|
||||||
# 新增私聊激活任务配置
|
|
||||||
(
|
|
||||||
# Use lambda to pass the interval to the runner function
|
|
||||||
lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
|
|
||||||
"debug",
|
|
||||||
f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
|
|
||||||
"_private_chat_activation_task",
|
|
||||||
),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
# 根据 chat_mode 条件添加专注评估任务
|
# 根据 chat_mode 条件添加其他任务
|
||||||
if not (global_config.chat.chat_mode == "normal"):
|
if not (global_config.chat.chat_mode == "normal"):
|
||||||
task_configs.append(
|
task_configs.extend([
|
||||||
|
(
|
||||||
|
self._run_cleanup_cycle,
|
||||||
|
"info",
|
||||||
|
f"清理任务已启动 间隔:{CLEANUP_INTERVAL_SECONDS}s",
|
||||||
|
"_cleanup_task",
|
||||||
|
),
|
||||||
|
# 新增私聊激活任务配置
|
||||||
|
(
|
||||||
|
# Use lambda to pass the interval to the runner function
|
||||||
|
lambda: self._run_private_chat_activation_cycle(PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS),
|
||||||
|
"debug",
|
||||||
|
f"私聊激活检查任务已启动 间隔:{PRIVATE_CHAT_ACTIVATION_CHECK_INTERVAL_SECONDS}s",
|
||||||
|
"_private_chat_activation_task",
|
||||||
|
),
|
||||||
(
|
(
|
||||||
self._run_into_focus_cycle,
|
self._run_into_focus_cycle,
|
||||||
"debug", # 设为debug,避免过多日志
|
"debug", # 设为debug,避免过多日志
|
||||||
f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
|
f"专注评估任务已启动 间隔:{INTEREST_EVAL_INTERVAL_SECONDS}s",
|
||||||
"_into_focus_task",
|
"_into_focus_task",
|
||||||
)
|
)
|
||||||
)
|
])
|
||||||
else:
|
else:
|
||||||
logger.info("聊天模式为 normal,跳过启动专注评估任务")
|
logger.info("聊天模式为 normal,跳过启动清理任务、私聊激活任务和专注评估任务")
|
||||||
|
|
||||||
# 统一启动所有任务
|
# 统一启动所有任务
|
||||||
for task_func, log_level, log_msg, task_attr_name in task_configs:
|
for task_func, log_level, log_msg, task_attr_name in task_configs:
|
||||||
|
|||||||
@@ -66,10 +66,6 @@ class ChattingObservation(Observation):
|
|||||||
self.oldest_messages = []
|
self.oldest_messages = []
|
||||||
self.oldest_messages_str = ""
|
self.oldest_messages_str = ""
|
||||||
self.compressor_prompt = ""
|
self.compressor_prompt = ""
|
||||||
# TODO: API-Adapter修改标记
|
|
||||||
self.model_summary = LLMRequest(
|
|
||||||
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
|
||||||
)
|
|
||||||
|
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
|
self.is_group_chat, self.chat_target_info = await get_chat_type_and_target_info(self.chat_id)
|
||||||
|
|||||||
@@ -193,7 +193,6 @@ class MemoryGraph:
|
|||||||
class Hippocampus:
|
class Hippocampus:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.memory_graph = MemoryGraph()
|
self.memory_graph = MemoryGraph()
|
||||||
self.llm_topic_judge = None
|
|
||||||
self.model_summary = None
|
self.model_summary = None
|
||||||
self.entorhinal_cortex = None
|
self.entorhinal_cortex = None
|
||||||
self.parahippocampal_gyrus = None
|
self.parahippocampal_gyrus = None
|
||||||
@@ -205,8 +204,7 @@ class Hippocampus:
|
|||||||
# 从数据库加载记忆图
|
# 从数据库加载记忆图
|
||||||
self.entorhinal_cortex.sync_memory_from_db()
|
self.entorhinal_cortex.sync_memory_from_db()
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.llm_topic_judge = LLMRequest(global_config.model.topic_judge, request_type="memory")
|
self.model_summary = LLMRequest(global_config.model.memory_summary, request_type="memory")
|
||||||
self.model_summary = LLMRequest(global_config.model.summary, request_type="memory")
|
|
||||||
|
|
||||||
def get_all_node_names(self) -> list:
|
def get_all_node_names(self) -> list:
|
||||||
"""获取记忆图中所有节点的名字列表"""
|
"""获取记忆图中所有节点的名字列表"""
|
||||||
@@ -344,7 +342,7 @@ class Hippocampus:
|
|||||||
# 使用LLM提取关键词
|
# 使用LLM提取关键词
|
||||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||||
# logger.info(f"提取关键词数量: {topic_num}")
|
# logger.info(f"提取关键词数量: {topic_num}")
|
||||||
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
|
topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
|
||||||
|
|
||||||
# 提取关键词
|
# 提取关键词
|
||||||
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
||||||
@@ -699,7 +697,7 @@ class Hippocampus:
|
|||||||
# 使用LLM提取关键词
|
# 使用LLM提取关键词
|
||||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||||
# logger.info(f"提取关键词数量: {topic_num}")
|
# logger.info(f"提取关键词数量: {topic_num}")
|
||||||
topics_response = await self.llm_topic_judge.generate_response(self.find_topic_llm(text, topic_num))
|
topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
|
||||||
|
|
||||||
# 提取关键词
|
# 提取关键词
|
||||||
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
||||||
@@ -1126,7 +1124,7 @@ class ParahippocampalGyrus:
|
|||||||
|
|
||||||
# 2. 使用LLM提取关键主题
|
# 2. 使用LLM提取关键主题
|
||||||
topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate)
|
topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate)
|
||||||
topics_response = await self.hippocampus.llm_topic_judge.generate_response(
|
topics_response = await self.hippocampus.model_summary.generate_response(
|
||||||
self.hippocampus.find_topic_llm(input_text, topic_num)
|
self.hippocampus.find_topic_llm(input_text, topic_num)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ class NormalChatGenerator:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.model_reasoning = LLMRequest(
|
self.model_reasoning = LLMRequest(
|
||||||
model=global_config.model.reasoning,
|
model=global_config.model.normal_chat_1,
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
max_tokens=3000,
|
max_tokens=3000,
|
||||||
request_type="response_reasoning",
|
request_type="response_reasoning",
|
||||||
@@ -30,7 +30,7 @@ class NormalChatGenerator:
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLMRequest(
|
self.model_sum = LLMRequest(
|
||||||
model=global_config.model.summary, temperature=0.7, max_tokens=3000, request_type="relation"
|
model=global_config.model.memory_summary, temperature=0.7, max_tokens=3000, request_type="relation"
|
||||||
)
|
)
|
||||||
self.current_model_type = "r1" # 默认使用 R1
|
self.current_model_type = "r1" # 默认使用 R1
|
||||||
self.current_model_name = "unknown model"
|
self.current_model_name = "unknown model"
|
||||||
|
|||||||
@@ -130,6 +130,7 @@ class ImageManager:
|
|||||||
# 根据配置决定是否保存图片
|
# 根据配置决定是否保存图片
|
||||||
if global_config.emoji.save_emoji:
|
if global_config.emoji.save_emoji:
|
||||||
# 生成文件名和路径
|
# 生成文件名和路径
|
||||||
|
logger.debug(f"保存表情包: {image_hash}")
|
||||||
current_timestamp = time.time()
|
current_timestamp = time.time()
|
||||||
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
|
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
|
||||||
emoji_dir = os.path.join(self.IMAGE_DIR, "emoji")
|
emoji_dir = os.path.join(self.IMAGE_DIR, "emoji")
|
||||||
@@ -156,7 +157,7 @@ class ImageManager:
|
|||||||
description=description,
|
description=description,
|
||||||
timestamp=current_timestamp,
|
timestamp=current_timestamp,
|
||||||
)
|
)
|
||||||
logger.trace(f"保存表情包元数据: {file_path}")
|
# logger.debug(f"保存表情包元数据: {file_path}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"保存表情包文件或元数据失败: {str(e)}")
|
logger.error(f"保存表情包文件或元数据失败: {str(e)}")
|
||||||
|
|
||||||
|
|||||||
@@ -178,10 +178,10 @@ class EmojiConfig(ConfigBase):
|
|||||||
check_interval: int = 120
|
check_interval: int = 120
|
||||||
"""表情包检查间隔(分钟)"""
|
"""表情包检查间隔(分钟)"""
|
||||||
|
|
||||||
save_pic: bool = False
|
save_pic: bool = True
|
||||||
"""是否保存图片"""
|
"""是否保存图片"""
|
||||||
|
|
||||||
save_emoji: bool = False
|
save_emoji: bool = True
|
||||||
"""是否保存表情包"""
|
"""是否保存表情包"""
|
||||||
|
|
||||||
cache_emoji: bool = True
|
cache_emoji: bool = True
|
||||||
@@ -384,26 +384,32 @@ class ModelConfig(ConfigBase):
|
|||||||
normal: dict[str, Any] = field(default_factory=lambda: {})
|
normal: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""普通模型配置"""
|
"""普通模型配置"""
|
||||||
|
|
||||||
topic_judge: dict[str, Any] = field(default_factory=lambda: {})
|
memory_summary: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""主题判断模型配置"""
|
"""记忆的概括模型配置"""
|
||||||
|
|
||||||
summary: dict[str, Any] = field(default_factory=lambda: {})
|
|
||||||
"""摘要模型配置"""
|
|
||||||
|
|
||||||
vlm: dict[str, Any] = field(default_factory=lambda: {})
|
vlm: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""视觉语言模型配置"""
|
"""视觉语言模型配置"""
|
||||||
|
|
||||||
heartflow: dict[str, Any] = field(default_factory=lambda: {})
|
|
||||||
"""心流模型配置"""
|
|
||||||
|
|
||||||
observation: dict[str, Any] = field(default_factory=lambda: {})
|
observation: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""观察模型配置"""
|
"""观察模型配置"""
|
||||||
|
|
||||||
sub_heartflow: dict[str, Any] = field(default_factory=lambda: {})
|
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""子心流模型配置"""
|
"""专注工作记忆模型配置"""
|
||||||
|
|
||||||
plan: dict[str, Any] = field(default_factory=lambda: {})
|
focus_chat_mind: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""计划模型配置"""
|
"""专注聊天规划模型配置"""
|
||||||
|
|
||||||
|
focus_self_recognize: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
|
"""专注自我识别模型配置"""
|
||||||
|
|
||||||
|
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
|
"""专注工具使用模型配置"""
|
||||||
|
|
||||||
|
focus_planner: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
|
"""专注规划模型配置"""
|
||||||
|
|
||||||
|
focus_expressor: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
|
"""专注表达器模型配置"""
|
||||||
|
|
||||||
embedding: dict[str, Any] = field(default_factory=lambda: {})
|
embedding: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""嵌入模型配置"""
|
"""嵌入模型配置"""
|
||||||
@@ -417,5 +423,6 @@ class ModelConfig(ConfigBase):
|
|||||||
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
|
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
|
||||||
"""PFC回复检查模型配置"""
|
"""PFC回复检查模型配置"""
|
||||||
|
|
||||||
tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
|
||||||
"""工具使用模型配置"""
|
|
||||||
|
|
||||||
|
|||||||
@@ -459,6 +459,7 @@ class LLMRequest:
|
|||||||
logger.error(
|
logger.error(
|
||||||
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
|
f"模型 {self.model_name} 错误码: {response.status} - {error_code_mapping.get(response.status)}"
|
||||||
)
|
)
|
||||||
|
print(request_content)
|
||||||
print(response)
|
print(response)
|
||||||
# 尝试获取并记录服务器返回的详细错误信息
|
# 尝试获取并记录服务器返回的详细错误信息
|
||||||
try:
|
try:
|
||||||
@@ -499,8 +500,8 @@ class LLMRequest:
|
|||||||
if global_config.model.normal.get("name") == old_model_name:
|
if global_config.model.normal.get("name") == old_model_name:
|
||||||
global_config.model.normal["name"] = self.model_name
|
global_config.model.normal["name"] = self.model_name
|
||||||
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
|
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
|
||||||
if global_config.model.reasoning.get("name") == old_model_name:
|
if global_config.model.normal_chat_1.get("name") == old_model_name:
|
||||||
global_config.model.reasoning["name"] = self.model_name
|
global_config.model.normal_chat_1["name"] = self.model_name
|
||||||
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
|
logger.warning(f"将全局配置中的 llm_reasoning 模型临时降级至{self.model_name}")
|
||||||
|
|
||||||
if payload and "model" in payload:
|
if payload and "model" in payload:
|
||||||
|
|||||||
@@ -1,18 +1,9 @@
|
|||||||
from src.llm_models.utils_model import LLMRequest
|
|
||||||
from src.config.config import global_config
|
|
||||||
import json
|
import json
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
|
from src.tools.tool_can_use import get_all_tool_definitions, get_tool_instance
|
||||||
|
|
||||||
logger = get_logger("tool_use")
|
logger = get_logger("tool_use")
|
||||||
|
|
||||||
|
|
||||||
class ToolUser:
|
class ToolUser:
|
||||||
def __init__(self):
|
|
||||||
self.llm_model_tool = LLMRequest(
|
|
||||||
model=global_config.model.tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
|
||||||
)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _define_tools():
|
def _define_tools():
|
||||||
"""获取所有已注册工具的定义
|
"""获取所有已注册工具的定义
|
||||||
|
|||||||
@@ -196,7 +196,7 @@ pfc_chatting = false # 是否启用PFC聊天,该功能仅作用于私聊,与
|
|||||||
model_max_output_length = 800 # 模型单次返回的最大token数
|
model_max_output_length = 800 # 模型单次返回的最大token数
|
||||||
|
|
||||||
#这个模型必须是推理模型
|
#这个模型必须是推理模型
|
||||||
[model.reasoning] # 一般聊天模式的推理回复模型
|
[model.normal_chat_1] # 一般聊天模式的首要回复模型,推荐使用 推理模型
|
||||||
name = "Pro/deepseek-ai/DeepSeek-R1"
|
name = "Pro/deepseek-ai/DeepSeek-R1"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
|
pri_in = 1.0 #模型的输入价格(非必填,可以记录消耗)
|
||||||
@@ -210,13 +210,7 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
|||||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
||||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||||
|
|
||||||
[model.topic_judge] #主题判断模型:建议使用qwen2.5 7b
|
[model.memory_summary] # 记忆的概括模型,建议使用qwen2.5 32b 及以上
|
||||||
name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
|
||||||
provider = "SILICONFLOW"
|
|
||||||
pri_in = 0.35
|
|
||||||
pri_out = 0.35
|
|
||||||
|
|
||||||
[model.summary] #概括模型,建议使用qwen2.5 32b 及以上
|
|
||||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 1.26
|
pri_in = 1.26
|
||||||
@@ -228,12 +222,6 @@ provider = "SILICONFLOW"
|
|||||||
pri_in = 0.35
|
pri_in = 0.35
|
||||||
pri_out = 0.35
|
pri_out = 0.35
|
||||||
|
|
||||||
[model.heartflow] # 用于控制麦麦是否参与聊天的模型
|
|
||||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
|
||||||
provider = "SILICONFLOW"
|
|
||||||
pri_in = 1.26
|
|
||||||
pri_out = 1.26
|
|
||||||
|
|
||||||
[model.observation] #观察模型,压缩聊天内容,建议用免费的
|
[model.observation] #观察模型,压缩聊天内容,建议用免费的
|
||||||
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
name = "Qwen/Qwen2.5-7B-Instruct"
|
name = "Qwen/Qwen2.5-7B-Instruct"
|
||||||
@@ -241,19 +229,48 @@ provider = "SILICONFLOW"
|
|||||||
pri_in = 0
|
pri_in = 0
|
||||||
pri_out = 0
|
pri_out = 0
|
||||||
|
|
||||||
[model.sub_heartflow] #心流:认真聊天时,生成麦麦的内心想法,必须使用具有工具调用能力的模型
|
[model.focus_working_memory] #工作记忆模型,建议使用qwen2.5 32b
|
||||||
|
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
|
provider = "SILICONFLOW"
|
||||||
|
pri_in = 1.26
|
||||||
|
pri_out = 1.26
|
||||||
|
|
||||||
|
[model.focus_chat_mind] #聊天规划:认真聊天时,生成麦麦对聊天的规划想法
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 2
|
pri_in = 2
|
||||||
pri_out = 8
|
pri_out = 8
|
||||||
temp = 0.3 #模型的温度,新V3建议0.1-0.3
|
temp = 0.3 #模型的温度,新V3建议0.1-0.3
|
||||||
|
|
||||||
[model.plan] #决策:认真聊天时,负责决定麦麦该做什么
|
[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
|
||||||
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
|
provider = "SILICONFLOW"
|
||||||
|
pri_in = 1.26
|
||||||
|
pri_out = 1.26
|
||||||
|
|
||||||
|
[model.focus_planner] #决策:认真聊天时,负责决定麦麦该做什么
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 2
|
pri_in = 2
|
||||||
pri_out = 8
|
pri_out = 8
|
||||||
|
|
||||||
|
#表达器模型,用于生成表达方式
|
||||||
|
[model.focus_expressor]
|
||||||
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
|
provider = "SILICONFLOW"
|
||||||
|
pri_in = 2
|
||||||
|
pri_out = 8
|
||||||
|
temp = 0.3
|
||||||
|
|
||||||
|
#自我识别模型,用于自我认知和身份识别
|
||||||
|
[model.focus_self_recognize]
|
||||||
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
|
provider = "SILICONFLOW"
|
||||||
|
pri_in = 2
|
||||||
|
pri_out = 8
|
||||||
|
temp = 0.3
|
||||||
|
|
||||||
#嵌入模型
|
#嵌入模型
|
||||||
|
|
||||||
[model.embedding] #嵌入
|
[model.embedding] #嵌入
|
||||||
@@ -263,6 +280,9 @@ pri_in = 0
|
|||||||
pri_out = 0
|
pri_out = 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#私聊PFC:需要开启PFC功能,默认三个模型均为硅基流动v3,如果需要支持多人同时私聊或频繁调用,建议把其中的一个或两个换成官方v3或其它模型,以免撞到429
|
#私聊PFC:需要开启PFC功能,默认三个模型均为硅基流动v3,如果需要支持多人同时私聊或频繁调用,建议把其中的一个或两个换成官方v3或其它模型,以免撞到429
|
||||||
|
|
||||||
#PFC决策模型
|
#PFC决策模型
|
||||||
@@ -289,15 +309,6 @@ pri_in = 2
|
|||||||
pri_out = 8
|
pri_out = 8
|
||||||
|
|
||||||
|
|
||||||
#以下模型暂时没有使用!!
|
|
||||||
#以下模型暂时没有使用!!
|
|
||||||
#以下模型暂时没有使用!!
|
|
||||||
#以下模型暂时没有使用!!
|
|
||||||
#以下模型暂时没有使用!!
|
|
||||||
|
|
||||||
[model.tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
|
|
||||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
|
||||||
provider = "SILICONFLOW"
|
|
||||||
pri_in = 1.26
|
|
||||||
pri_out = 1.26
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user