config:修改配置,可以选择开启tool,focus也支持次要回复模型
This commit is contained in:
@@ -17,7 +17,6 @@ from src.chat.focus_chat.info_processors.working_memory_processor import Working
|
||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.chat.heart_flow.observation.working_observation import WorkingMemoryObservation
|
||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
||||
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
||||
from src.chat.heart_flow.observation.actions_observation import ActionObservation
|
||||
|
||||
from src.chat.focus_chat.memory_activator import MemoryActivator
|
||||
@@ -28,7 +27,6 @@ from src.chat.focus_chat.planners.action_manager import ActionManager
|
||||
from src.config.config import global_config
|
||||
from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger
|
||||
from src.chat.focus_chat.hfc_version_manager import get_hfc_version
|
||||
from src.chat.focus_chat.info.structured_info import StructuredInfo
|
||||
from src.person_info.relationship_builder_manager import relationship_builder_manager
|
||||
|
||||
|
||||
@@ -41,7 +39,6 @@ OBSERVATION_CLASSES = {
|
||||
"ChattingObservation": (ChattingObservation, "chat_id"),
|
||||
"WorkingMemoryObservation": (WorkingMemoryObservation, "observe_id"),
|
||||
"HFCloopObservation": (HFCloopObservation, "observe_id"),
|
||||
"StructureObservation": (StructureObservation, "observe_id"),
|
||||
}
|
||||
|
||||
# 定义处理器映射:键是处理器名称,值是 (处理器类, 可选的配置键名)
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
from datetime import datetime
|
||||
from src.common.logger import get_logger
|
||||
|
||||
# Import the new utility function
|
||||
|
||||
logger = get_logger("observation")
|
||||
|
||||
|
||||
# 所有观察的基类
|
||||
class StructureObservation:
|
||||
def __init__(self, observe_id):
|
||||
self.observe_info = ""
|
||||
self.observe_id = observe_id
|
||||
self.last_observe_time = datetime.now().timestamp() # 初始化为当前时间
|
||||
self.history_loop = []
|
||||
self.structured_info = []
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""将观察对象转换为可序列化的字典"""
|
||||
return {
|
||||
"observe_info": self.observe_info,
|
||||
"observe_id": self.observe_id,
|
||||
"last_observe_time": self.last_observe_time,
|
||||
"history_loop": self.history_loop,
|
||||
"structured_info": self.structured_info,
|
||||
}
|
||||
|
||||
def get_observe_info(self):
|
||||
return self.structured_info
|
||||
|
||||
def add_structured_info(self, structured_info: dict):
|
||||
self.structured_info.append(structured_info)
|
||||
|
||||
async def observe(self):
|
||||
observed_structured_infos = []
|
||||
for structured_info in self.structured_info:
|
||||
if structured_info.get("ttl") > 0:
|
||||
structured_info["ttl"] -= 1
|
||||
observed_structured_infos.append(structured_info)
|
||||
logger.debug(f"观察到结构化信息仍旧在: {structured_info}")
|
||||
|
||||
self.structured_info = observed_structured_infos
|
||||
@@ -69,7 +69,6 @@ class NormalChat:
|
||||
|
||||
# 初始化Normal Chat专用表达器
|
||||
self.expressor = NormalChatExpressor(self.chat_stream)
|
||||
self.replyer = DefaultReplyer(self.chat_stream)
|
||||
|
||||
# Interest dict
|
||||
self.interest_dict = interest_dict
|
||||
|
||||
@@ -16,7 +16,7 @@ class NormalChatGenerator:
|
||||
model_config_1 = global_config.model.replyer_1.copy()
|
||||
model_config_2 = global_config.model.replyer_2.copy()
|
||||
|
||||
prob_first = global_config.normal_chat.normal_chat_first_probability
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
model_config_1["weight"] = prob_first
|
||||
model_config_2["weight"] = 1.0 - prob_first
|
||||
@@ -42,15 +42,13 @@ class NormalChatGenerator:
|
||||
relation_info = await person_info_manager.get_value(person_id, "short_impression")
|
||||
reply_to_str = f"{person_name}:{message.processed_plain_text}"
|
||||
|
||||
structured_info = ""
|
||||
|
||||
try:
|
||||
success, reply_set, prompt = await generator_api.generate_reply(
|
||||
chat_stream=message.chat_stream,
|
||||
reply_to=reply_to_str,
|
||||
relation_info=relation_info,
|
||||
structured_info=structured_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_in_normal_chat,
|
||||
model_configs=self.model_configs,
|
||||
request_type="normal.replyer",
|
||||
return_prompt=True,
|
||||
|
||||
@@ -137,19 +137,28 @@ class DefaultReplyer:
|
||||
def __init__(
|
||||
self,
|
||||
chat_stream: ChatStream,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "focus.replyer",
|
||||
):
|
||||
self.log_prefix = "replyer"
|
||||
self.request_type = request_type
|
||||
|
||||
self.enable_tool = enable_tool
|
||||
|
||||
if model_configs:
|
||||
self.express_model_configs = model_configs
|
||||
else:
|
||||
# 当未提供配置时,使用默认配置并赋予默认权重
|
||||
default_config = global_config.model.replyer_1.copy()
|
||||
default_config.setdefault("weight", 1.0)
|
||||
self.express_model_configs = [default_config]
|
||||
|
||||
model_config_1 = global_config.model.replyer_1.copy()
|
||||
model_config_2 = global_config.model.replyer_2.copy()
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
model_config_1["weight"] = prob_first
|
||||
model_config_2["weight"] = 1.0 - prob_first
|
||||
|
||||
self.express_model_configs = [model_config_1, model_config_2]
|
||||
|
||||
if not self.express_model_configs:
|
||||
logger.warning("未找到有效的模型配置,回复生成可能会失败。")
|
||||
@@ -169,9 +178,6 @@ class DefaultReplyer:
|
||||
cache_ttl=3
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
def _select_weighted_model_config(self) -> Dict[str, Any]:
|
||||
"""使用加权随机选择来挑选一个模型配置"""
|
||||
configs = self.express_model_configs
|
||||
@@ -214,7 +220,6 @@ class DefaultReplyer:
|
||||
reply_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
structured_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
) -> Tuple[bool, Optional[str]]:
|
||||
@@ -231,7 +236,6 @@ class DefaultReplyer:
|
||||
reply_data = {
|
||||
"reply_to": reply_to,
|
||||
"relation_info": relation_info,
|
||||
"structured_info": structured_info,
|
||||
"extra_info": extra_info,
|
||||
}
|
||||
for key, value in reply_data.items():
|
||||
@@ -514,8 +518,6 @@ class DefaultReplyer:
|
||||
person_info_manager = get_person_info_manager()
|
||||
bot_person_id = person_info_manager.get_person_id("system", "bot_id")
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
structured_info = reply_data.get("structured_info", "")
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "")
|
||||
|
||||
@@ -569,18 +571,15 @@ class DefaultReplyer:
|
||||
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
|
||||
if structured_info:
|
||||
structured_info_block = (
|
||||
f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{structured_info}\n以上是一些额外的信息。"
|
||||
if tool_info:
|
||||
tool_info_block = (
|
||||
f"以下是你了解的额外信息信息,现在请你阅读以下内容,进行决策\n{tool_info}\n以上是一些额外的信息。"
|
||||
)
|
||||
else:
|
||||
structured_info_block = ""
|
||||
|
||||
if tool_info:
|
||||
tool_info_block = f"{tool_info}"
|
||||
else:
|
||||
tool_info_block = ""
|
||||
|
||||
|
||||
|
||||
if extra_info_block:
|
||||
extra_info_block = f"以下是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策\n{extra_info_block}\n以上是你在回复时需要参考的信息,现在请你阅读以下内容,进行决策"
|
||||
else:
|
||||
@@ -652,7 +651,6 @@ class DefaultReplyer:
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
memory_block=memory_block,
|
||||
structured_info_block=structured_info_block,
|
||||
tool_info_block=tool_info_block,
|
||||
extra_info_block=extra_info_block,
|
||||
relation_info_block=relation_info,
|
||||
@@ -683,7 +681,6 @@ class DefaultReplyer:
|
||||
chat_target=chat_target_1,
|
||||
chat_info=chat_talking_prompt,
|
||||
memory_block=memory_block,
|
||||
structured_info_block=structured_info_block,
|
||||
tool_info_block=tool_info_block,
|
||||
relation_info_block=relation_info,
|
||||
extra_info_block=extra_info_block,
|
||||
|
||||
@@ -14,6 +14,7 @@ class ReplyerManager:
|
||||
self,
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -49,6 +50,7 @@ class ReplyerManager:
|
||||
# model_configs 只在此时(初始化时)生效
|
||||
replyer = DefaultReplyer(
|
||||
chat_stream=target_stream,
|
||||
enable_tool=enable_tool,
|
||||
model_configs=model_configs, # 可以是None,此时使用默认模型
|
||||
request_type=request_type,
|
||||
)
|
||||
|
||||
@@ -30,11 +30,11 @@ from src.config.official_configs import (
|
||||
TelemetryConfig,
|
||||
ExperimentalConfig,
|
||||
ModelConfig,
|
||||
FocusChatProcessorConfig,
|
||||
MessageReceiveConfig,
|
||||
MaimMessageConfig,
|
||||
LPMMKnowledgeConfig,
|
||||
RelationshipConfig,
|
||||
ToolConfig,
|
||||
)
|
||||
|
||||
install(extra_lines=3)
|
||||
@@ -151,7 +151,6 @@ class Config(ConfigBase):
|
||||
message_receive: MessageReceiveConfig
|
||||
normal_chat: NormalChatConfig
|
||||
focus_chat: FocusChatConfig
|
||||
focus_chat_processor: FocusChatProcessorConfig
|
||||
emoji: EmojiConfig
|
||||
expression: ExpressionConfig
|
||||
memory: MemoryConfig
|
||||
@@ -165,7 +164,7 @@ class Config(ConfigBase):
|
||||
model: ModelConfig
|
||||
maim_message: MaimMessageConfig
|
||||
lpmm_knowledge: LPMMKnowledgeConfig
|
||||
|
||||
tool: ToolConfig
|
||||
|
||||
def load_config(config_path: str) -> Config:
|
||||
"""
|
||||
|
||||
@@ -78,6 +78,12 @@ class ChatConfig(ConfigBase):
|
||||
max_context_size: int = 18
|
||||
"""上下文长度"""
|
||||
|
||||
replyer_random_probability: float = 0.5
|
||||
"""
|
||||
发言时选择推理模型的概率(0-1之间)
|
||||
选择普通模型的概率为 1 - reasoning_normal_model_probability
|
||||
"""
|
||||
|
||||
talk_frequency: float = 1
|
||||
"""回复频率阈值"""
|
||||
|
||||
@@ -264,12 +270,6 @@ class MessageReceiveConfig(ConfigBase):
|
||||
class NormalChatConfig(ConfigBase):
|
||||
"""普通聊天配置类"""
|
||||
|
||||
normal_chat_first_probability: float = 0.3
|
||||
"""
|
||||
发言时选择推理模型的概率(0-1之间)
|
||||
选择普通模型的概率为 1 - reasoning_normal_model_probability
|
||||
"""
|
||||
|
||||
message_buffer: bool = False
|
||||
"""消息缓冲器"""
|
||||
|
||||
@@ -337,6 +337,15 @@ class ExpressionConfig(ConfigBase):
|
||||
格式: [["qq:12345:group", "qq:67890:private"]]
|
||||
"""
|
||||
|
||||
@dataclass
|
||||
class ToolConfig(ConfigBase):
|
||||
"""工具配置类"""
|
||||
|
||||
enable_in_normal_chat: bool = False
|
||||
"""是否在普通聊天中启用工具"""
|
||||
|
||||
enable_in_focus_chat: bool = True
|
||||
"""是否在专注聊天中启用工具"""
|
||||
|
||||
@dataclass
|
||||
class EmojiConfig(ConfigBase):
|
||||
@@ -636,7 +645,7 @@ class ModelConfig(ConfigBase):
|
||||
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""专注工作记忆模型配置"""
|
||||
|
||||
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
||||
tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""专注工具使用模型配置"""
|
||||
|
||||
planner: dict[str, Any] = field(default_factory=lambda: {})
|
||||
|
||||
@@ -27,6 +27,7 @@ logger = get_logger("generator_api")
|
||||
def get_replyer(
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
enable_tool: bool = False,
|
||||
model_configs: Optional[List[Dict[str, Any]]] = None,
|
||||
request_type: str = "replyer",
|
||||
) -> Optional[DefaultReplyer]:
|
||||
@@ -47,7 +48,11 @@ def get_replyer(
|
||||
try:
|
||||
logger.debug(f"[GeneratorAPI] 正在获取回复器,chat_id: {chat_id}, chat_stream: {'有' if chat_stream else '无'}")
|
||||
return replyer_manager.get_replyer(
|
||||
chat_stream=chat_stream, chat_id=chat_id, model_configs=model_configs, request_type=request_type
|
||||
chat_stream=chat_stream,
|
||||
chat_id=chat_id,
|
||||
model_configs=model_configs,
|
||||
request_type=request_type,
|
||||
enable_tool=enable_tool,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"[GeneratorAPI] 获取回复器时发生意外错误: {e}", exc_info=True)
|
||||
@@ -66,9 +71,9 @@ async def generate_reply(
|
||||
action_data: Dict[str, Any] = None,
|
||||
reply_to: str = "",
|
||||
relation_info: str = "",
|
||||
structured_info: str = "",
|
||||
extra_info: str = "",
|
||||
available_actions: List[str] = None,
|
||||
enable_tool: bool = False,
|
||||
enable_splitter: bool = True,
|
||||
enable_chinese_typo: bool = True,
|
||||
return_prompt: bool = False,
|
||||
@@ -89,7 +94,7 @@ async def generate_reply(
|
||||
"""
|
||||
try:
|
||||
# 获取回复器
|
||||
replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type)
|
||||
replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type, enable_tool=enable_tool)
|
||||
if not replyer:
|
||||
logger.error("[GeneratorAPI] 无法获取回复器")
|
||||
return False, []
|
||||
@@ -101,7 +106,6 @@ async def generate_reply(
|
||||
reply_data=action_data or {},
|
||||
reply_to=reply_to,
|
||||
relation_info=relation_info,
|
||||
structured_info=structured_info,
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
|
||||
@@ -10,8 +10,7 @@
|
||||
"license": "GPL-v3.0-or-later",
|
||||
|
||||
"host_application": {
|
||||
"min_version": "0.8.0",
|
||||
"max_version": "0.8.10"
|
||||
"min_version": "0.8.0"
|
||||
},
|
||||
"homepage_url": "https://github.com/MaiM-with-u/maibot",
|
||||
"repository_url": "https://github.com/MaiM-with-u/maibot",
|
||||
|
||||
@@ -63,6 +63,7 @@ class ReplyAction(BaseAction):
|
||||
action_data=self.action_data,
|
||||
chat_id=self.chat_id,
|
||||
request_type="focus.replyer",
|
||||
enable_tool=global_config.tool.enable_in_focus_chat,
|
||||
)
|
||||
|
||||
# 检查从start_time以来的新消息数量
|
||||
|
||||
@@ -2,7 +2,6 @@ from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
from src.common.logger import get_logger
|
||||
from src.individuality.individuality import get_individuality
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.chat.utils.json_utils import process_llm_tool_calls
|
||||
@@ -45,7 +44,7 @@ class ToolExecutor:
|
||||
self.chat_id = chat_id
|
||||
self.log_prefix = f"[ToolExecutor:{self.chat_id}] "
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.model.focus_tool_use,
|
||||
model=global_config.model.tool_use,
|
||||
request_type="tool_executor",
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[inner]
|
||||
version = "2.30.0"
|
||||
version = "3.0.0"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请在修改后将version的值进行变更
|
||||
@@ -66,6 +66,8 @@ chat_mode = "normal" # 聊天模式 —— 普通模式:normal,专注模式
|
||||
|
||||
max_context_size = 18 # 上下文长度
|
||||
|
||||
replyer_random_probability = 0.5 # 首要replyer模型被选择的概率
|
||||
|
||||
talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁
|
||||
|
||||
time_based_talk_frequency = ["8:00,1", "12:00,1.5", "18:00,2", "01:00,0.5"]
|
||||
@@ -113,7 +115,7 @@ ban_msgs_regex = [
|
||||
|
||||
[normal_chat] #普通聊天
|
||||
#一般回复参数
|
||||
normal_chat_first_probability = 0.5 # 麦麦回答时选择首要模型的概率(与之相对的,次要模型的概率为1 - normal_chat_first_probability)
|
||||
replyer_random_probability = 0.5 # 麦麦回答时选择首要模型的概率(与之相对的,次要模型的概率为1 - replyer_random_probability)
|
||||
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率,设置为1让麦麦自己决定发不发
|
||||
thinking_timeout = 120 # 麦麦最长思考时间,超过这个时间的思考会放弃(往往是api反应太慢)
|
||||
|
||||
@@ -135,6 +137,10 @@ compressed_length = 8 # 不能大于observation_context_size,心流上下文压
|
||||
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||
working_memory_processor = false # 是否启用工作记忆处理器,消耗量大
|
||||
|
||||
[tool]
|
||||
enable_in_normal_chat = false # 是否在普通聊天中启用工具
|
||||
enable_in_focus_chat = true # 是否在专注聊天中启用工具
|
||||
|
||||
[emoji]
|
||||
max_reg_num = 60 # 表情包最大注册数量
|
||||
do_replace = true # 开启则在达到最大数量时删除(替换)表情包,关闭则达到最大数量时不会继续收集表情包
|
||||
@@ -265,7 +271,7 @@ pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
||||
#默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
||||
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||
|
||||
[model.replyer_2] # 一般聊天模式的次要回复模型
|
||||
[model.replyer_2] # 次要回复模型
|
||||
name = "Pro/deepseek-ai/DeepSeek-R1"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 4.0 #模型的输入价格(非必填,可以记录消耗)
|
||||
@@ -302,6 +308,13 @@ pri_out = 2.8
|
||||
temp = 0.7
|
||||
enable_thinking = false # 是否启用思考
|
||||
|
||||
[model.tool_use] #工具调用模型,需要使用支持工具调用的模型
|
||||
name = "Qwen/Qwen3-14B"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 0.5
|
||||
pri_out = 2
|
||||
temp = 0.7
|
||||
enable_thinking = false # 是否启用思考(qwen3 only)
|
||||
|
||||
#嵌入模型
|
||||
[model.embedding]
|
||||
@@ -321,15 +334,6 @@ pri_out = 2.8
|
||||
temp = 0.7
|
||||
|
||||
|
||||
[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型
|
||||
name = "Qwen/Qwen3-14B"
|
||||
provider = "SILICONFLOW"
|
||||
pri_in = 0.5
|
||||
pri_out = 2
|
||||
temp = 0.7
|
||||
enable_thinking = false # 是否启用思考(qwen3 only)
|
||||
|
||||
|
||||
#------------LPMM知识库模型------------
|
||||
|
||||
[model.lpmm_entity_extract] # 实体提取模型
|
||||
|
||||
Reference in New Issue
Block a user