fix:修改模型名称,移除chat_mind处理器

This commit is contained in:
SengokuCola
2025-06-03 19:43:54 +08:00
parent 51f5e610d7
commit f94f14cce2
9 changed files with 25 additions and 48 deletions

View File

@@ -429,11 +429,9 @@ class ConfigEditor:
# "model.normal_chat_1": "普通聊天:主要聊天模型", # "model.normal_chat_1": "普通聊天:主要聊天模型",
# "model.normal_chat_2": "普通聊天:次要聊天模型", # "model.normal_chat_2": "普通聊天:次要聊天模型",
# "model.focus_working_memory": "专注模式:工作记忆模型", # "model.focus_working_memory": "专注模式:工作记忆模型",
# "model.focus_chat_mind": "专注模式:聊天思考模型",
# "model.focus_tool_use": "专注模式:工具调用模型", # "model.focus_tool_use": "专注模式:工具调用模型",
# "model.focus_planner": "专注模式:决策模型", # "model.focus_planner": "专注模式:决策模型",
# "model.focus_expressor": "专注模式:表达器模型", # "model.focus_expressor": "专注模式:表达器模型",
# "model.focus_self_recognize": "专注模式:自我识别模型"
# } # }
# 获取当前节的名称 # 获取当前节的名称
# current_section = ".".join(path[:-1]) # 去掉最后一个key # current_section = ".".join(path[:-1]) # 去掉最后一个key
@@ -494,11 +492,9 @@ class ConfigEditor:
"model.normal_chat_1": "主要聊天模型", "model.normal_chat_1": "主要聊天模型",
"model.normal_chat_2": "次要聊天模型", "model.normal_chat_2": "次要聊天模型",
"model.focus_working_memory": "工作记忆模型", "model.focus_working_memory": "工作记忆模型",
"model.focus_chat_mind": "聊天规划模型",
"model.focus_tool_use": "工具调用模型", "model.focus_tool_use": "工具调用模型",
"model.focus_planner": "决策模型", "model.focus_planner": "决策模型",
"model.focus_expressor": "表达器模型", "model.focus_expressor": "表达器模型",
"model.focus_self_recognize": "自我识别模型",
} }
section_trans = self.translations.get("sections", {}).get(full_section_path, {}) section_trans = self.translations.get("sections", {}).get(full_section_path, {})
section_name = section_trans.get("name") or section_translations.get(full_section_path) or section section_name = section_trans.get("name") or section_translations.get(full_section_path) or section

View File

@@ -68,8 +68,7 @@ class MindProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.focus_chat_mind, model=global_config.model.planner,
# temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800, max_tokens=800,
request_type="focus.processor.chat_mind", request_type="focus.processor.chat_mind",
) )

View File

@@ -49,8 +49,7 @@ class RelationshipProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.focus_self_recognize, model=global_config.model.relation,
temperature=global_config.model.focus_self_recognize["temp"],
max_tokens=800, max_tokens=800,
request_type="focus.processor.self_identify", request_type="focus.processor.self_identify",
) )

View File

@@ -51,8 +51,7 @@ class SelfProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.focus_self_recognize, model=global_config.model.relation,
temperature=global_config.model.focus_self_recognize["temp"],
max_tokens=800, max_tokens=800,
request_type="focus.processor.self_identify", request_type="focus.processor.self_identify",
) )

View File

@@ -60,8 +60,7 @@ class WorkingMemoryProcessor(BaseProcessor):
self.subheartflow_id = subheartflow_id self.subheartflow_id = subheartflow_id
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.focus_chat_mind, model=global_config.model.planner,
temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800, max_tokens=800,
request_type="focus.processor.working_memory", request_type="focus.processor.working_memory",
) )

View File

@@ -165,7 +165,7 @@ class FocusChatConfig(ConfigBase):
class FocusChatProcessorConfig(ConfigBase): class FocusChatProcessorConfig(ConfigBase):
"""专注聊天处理器配置类""" """专注聊天处理器配置类"""
mind_processor: bool = True mind_processor: bool = False
"""是否启用思维处理器""" """是否启用思维处理器"""
self_identify_processor: bool = True self_identify_processor: bool = True
@@ -180,9 +180,6 @@ class FocusChatProcessorConfig(ConfigBase):
working_memory_processor: bool = True working_memory_processor: bool = True
"""是否启用工作记忆处理器""" """是否启用工作记忆处理器"""
lite_chat_mind_processor: bool = False
"""是否启用轻量级聊天思维处理器可以节省token消耗和时间"""
@dataclass @dataclass
class ExpressionConfig(ConfigBase): class ExpressionConfig(ConfigBase):
@@ -445,11 +442,6 @@ class ModelConfig(ConfigBase):
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {}) focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""专注工作记忆模型配置""" """专注工作记忆模型配置"""
focus_chat_mind: dict[str, Any] = field(default_factory=lambda: {})
"""专注聊天规划模型配置"""
focus_self_recognize: dict[str, Any] = field(default_factory=lambda: {})
"""专注自我识别模型配置"""
focus_tool_use: dict[str, Any] = field(default_factory=lambda: {}) focus_tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""专注工具使用模型配置""" """专注工具使用模型配置"""
@@ -457,6 +449,9 @@ class ModelConfig(ConfigBase):
planner: dict[str, Any] = field(default_factory=lambda: {}) planner: dict[str, Any] = field(default_factory=lambda: {})
"""规划模型配置""" """规划模型配置"""
relation: dict[str, Any] = field(default_factory=lambda: {})
"""关系模型配置"""
focus_expressor: dict[str, Any] = field(default_factory=lambda: {}) focus_expressor: dict[str, Any] = field(default_factory=lambda: {})
"""专注表达器模型配置""" """专注表达器模型配置"""

View File

@@ -17,8 +17,8 @@ class ImpressionUpdateTask(AsyncTask):
def __init__(self): def __init__(self):
super().__init__( super().__init__(
task_name="impression_update", task_name="impression_update",
wait_before_start=10, # 启动后等待10秒 wait_before_start=2, # 启动后等待10秒
run_interval=30 # 每1分钟运行一次 run_interval=20 # 每1分钟运行一次
) )
async def run(self): async def run(self):
@@ -27,7 +27,7 @@ class ImpressionUpdateTask(AsyncTask):
# 获取最近10分钟的消息 # 获取最近10分钟的消息
current_time = int(time.time()) current_time = int(time.time())
start_time = current_time - 600 # 10分钟前 start_time = current_time - 6000 # 10分钟前
logger.debug(f"获取时间范围: {start_time} -> {current_time}") logger.debug(f"获取时间范围: {start_time} -> {current_time}")
# 获取所有消息 # 获取所有消息

View File

@@ -320,19 +320,19 @@ class RelationshipManager:
messages_before = get_raw_msg_by_timestamp_with_chat( messages_before = get_raw_msg_by_timestamp_with_chat(
chat_id=chat_id, chat_id=chat_id,
timestamp_start=timestamp - 600, # 前10分钟 timestamp_start=timestamp - 6000, # 前10分钟
timestamp_end=timestamp, timestamp_end=timestamp,
# person_ids=[user_id], # person_ids=[user_id],
limit=200, limit=100,
limit_mode="latest" limit_mode="latest"
) )
messages_after = get_raw_msg_by_timestamp_with_chat( messages_after = get_raw_msg_by_timestamp_with_chat(
chat_id=chat_id, chat_id=chat_id,
timestamp_start=timestamp, timestamp_start=timestamp,
timestamp_end=timestamp + 600, # 后10分钟 timestamp_end=timestamp + 6000, # 后10分钟
# person_ids=[user_id], # person_ids=[user_id],
limit=200, limit=100,
limit_mode="earliest" limit_mode="earliest"
) )

View File

@@ -102,14 +102,15 @@ observation_context_size = 20 # 观察到的最长上下文大小
compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5 compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除 compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
# 不建议更改
planner_type = "simple" # 规划器类型可选值complex复杂规划器, simple简单规划器 planner_type = "simple" # 规划器类型可选值complex复杂规划器, simple简单规划器
[focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗 [focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗
mind_processor = false # 是否启用思维处理器
self_identify_processor = true # 是否启用自我识别处理器 self_identify_processor = true # 是否启用自我识别处理器
relation_processor = true # 是否启用关系识别处理器 relation_processor = true # 是否启用关系识别处理器
tool_use_processor = false # 是否启用工具使用处理器 tool_use_processor = false # 是否启用工具使用处理器
working_memory_processor = false # 是否启用工作记忆处理器,不稳定,消耗量大 mind_processor = false # 是否启用思维处理器
working_memory_processor = false # 是否启用工作记忆处理器,消耗量大
[emoji] [emoji]
max_reg_num = 60 # 表情包最大注册数量 max_reg_num = 60 # 表情包最大注册数量
@@ -220,6 +221,13 @@ pri_in = 2
pri_out = 8 pri_out = 8
temp = 0.3 temp = 0.3
[model.relation] #用于处理和麦麦和其他人的关系
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
temp = 0.3
#嵌入模型 #嵌入模型
[model.embedding] [model.embedding]
@@ -255,14 +263,6 @@ pri_in = 0.7
pri_out = 2.8 pri_out = 2.8
temp = 0.7 temp = 0.7
[model.focus_chat_mind] #聊天规划:认真聊天时,生成麦麦对聊天的规划想法
name = "Pro/deepseek-ai/DeepSeek-V3"
# name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
# enable_thinking = false # 是否启用思考
pri_in = 2
pri_out = 8
temp = 0.3
[model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型 [model.focus_tool_use] #工具调用模型,需要使用支持工具调用的模型
name = "Qwen/Qwen3-14B" name = "Qwen/Qwen3-14B"
@@ -283,16 +283,6 @@ pri_in = 2
pri_out = 8 pri_out = 8
temp = 0.3 temp = 0.3
#自我识别模型,用于自我认知和身份识别
[model.focus_self_recognize]
# name = "Pro/deepseek-ai/DeepSeek-V3"
name = "Qwen/Qwen3-30B-A3B"
provider = "SILICONFLOW"
pri_in = 0.7
pri_out = 2.8
temp = 0.7
enable_thinking = false # 是否启用思考(qwen3 only)
[maim_message] [maim_message]