config:修改配置项

This commit is contained in:
SengokuCola
2025-07-01 14:59:00 +08:00
parent cec854cba2
commit 4dd04d4fb0
7 changed files with 23 additions and 41 deletions

View File

@@ -582,7 +582,7 @@ class HeartFChatting:
async def run_with_timeout(proc=processor):
return await asyncio.wait_for(
proc.process_info(observations=observations),
timeout=global_config.focus_chat.processor_max_time,
30
)
task = asyncio.create_task(run_with_timeout())
@@ -613,9 +613,9 @@ class HeartFChatting:
processor_time_costs[processor_name] = duration_since_parallel_start
except asyncio.TimeoutError:
logger.info(
f"{self.log_prefix} 处理器 {processor_name} 超时(>{global_config.focus_chat.processor_max_time}s已跳过"
f"{self.log_prefix} 处理器 {processor_name} 超时(>30s已跳过"
)
processor_time_costs[processor_name] = global_config.focus_chat.processor_max_time
processor_time_costs[processor_name] = 30
except Exception as e:
logger.error(
f"{self.log_prefix} 处理器 {processor_name} 执行失败,耗时 (自并行开始): {duration_since_parallel_start:.2f}秒. 错误: {e}",
@@ -672,7 +672,7 @@ class HeartFChatting:
try:
result = await asyncio.wait_for(
proc.process_info(observations=observations, action_type=action_type, action_data=action_data),
timeout=global_config.focus_chat.processor_max_time,
30
)
end_time = time.time()
post_processor_time_costs[name] = end_time - start_time
@@ -721,7 +721,7 @@ class HeartFChatting:
if task_type == "processor":
post_processor_time_costs[task_name] = elapsed_time
logger.warning(
f"{self.log_prefix} 后期处理器 {task_name} 超时(>{global_config.focus_chat.processor_max_time}s已跳过耗时: {elapsed_time:.3f}"
f"{self.log_prefix} 后期处理器 {task_name} 超时(>30s已跳过耗时: {elapsed_time:.3f}"
)
except Exception as e:
# 对于异常任务,记录已用时间

View File

@@ -67,7 +67,7 @@ class ChattingObservation(Observation):
self.talking_message_str_truncate_short = ""
self.name = global_config.bot.nickname
self.nick_name = global_config.bot.alias_names
self.max_now_obs_len = global_config.focus_chat.observation_context_size
self.max_now_obs_len = global_config.chat.max_context_size
self.overlap_len = global_config.focus_chat.compressed_length
self.person_list = []
self.compressor_prompt = ""

View File

@@ -80,7 +80,7 @@ class NormalChatActionModifier:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.focus_chat.observation_context_size, # 使用相同的配置
limit=global_config.chat.max_context_size, # 使用相同的配置
)
# 构建可读的聊天上下文

View File

@@ -122,7 +122,7 @@ class NormalChatPlanner:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=message.chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.focus_chat.observation_context_size,
limit=global_config.chat.max_context_size,
)
chat_context = build_readable_messages(

View File

@@ -350,7 +350,7 @@ class DefaultReplyer:
# 使用从处理器传来的选中表达方式
# LLM模式调用LLM选择5-10个然后随机选5个
selected_expressions = await expression_selector.select_suitable_expressions_llm(
self.chat_stream.stream_id, chat_history, max_num=12, min_num=2, target_message=target
self.chat_stream.stream_id, chat_history, max_num=8, min_num=2, target_message=target
)
if selected_expressions:
@@ -476,7 +476,7 @@ class DefaultReplyer:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(),
limit=global_config.focus_chat.observation_context_size,
limit=global_config.chat.max_context_size,
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
@@ -491,7 +491,7 @@ class DefaultReplyer:
message_list_before_now_half = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_id,
timestamp=time.time(),
limit=int(global_config.focus_chat.observation_context_size * 0.5),
limit=int(global_config.chat.max_context_size * 0.5),
)
chat_talking_prompt_half = build_readable_messages(
message_list_before_now_half,
@@ -654,7 +654,7 @@ class DefaultReplyer:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.focus_chat.observation_context_size,
limit=global_config.chat.max_context_size,
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,

View File

@@ -75,6 +75,9 @@ class ChatConfig(ConfigBase):
chat_mode: str = "normal"
"""聊天模式"""
max_context_size: int = 18
"""上下文长度"""
talk_frequency: float = 1
"""回复频率阈值"""
@@ -267,9 +270,6 @@ class NormalChatConfig(ConfigBase):
选择普通模型的概率为 1 - reasoning_normal_model_probability
"""
max_context_size: int = 15
"""上下文长度"""
message_buffer: bool = False
"""消息缓冲器"""
@@ -302,9 +302,6 @@ class NormalChatConfig(ConfigBase):
class FocusChatConfig(ConfigBase):
"""专注聊天配置类"""
observation_context_size: int = 20
"""可观察到的最长上下文大小,超过这个值的上下文会被压缩"""
compressed_length: int = 5
"""心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5"""
@@ -317,34 +314,18 @@ class FocusChatConfig(ConfigBase):
consecutive_replies: float = 1
"""连续回复能力,值越高,麦麦连续回复的概率越高"""
parallel_processing: bool = False
"""是否允许处理器阶段和回忆阶段并行执行"""
processor_max_time: int = 25
"""处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止"""
@dataclass
class FocusChatProcessorConfig(ConfigBase):
"""专注聊天处理器配置类"""
person_impression_processor: bool = True
"""是否启用关系识别处理器(已废弃,为了兼容性保留)"""
relationship_build_processor: bool = True
"""是否启用关系构建处理器"""
real_time_info_processor: bool = True
"""是否启用实时信息提取处理器"""
tool_use_processor: bool = True
"""是否启用工具使用处理器"""
working_memory_processor: bool = True
"""是否启用工作记忆处理器"""
expression_selector_processor: bool = True
"""是否启用表达方式选择处理器"""
@dataclass
@@ -444,6 +425,9 @@ class MemoryConfig(ConfigBase):
class MoodConfig(ConfigBase):
"""情绪配置类"""
enable_mood: bool = False
"""是否启用情绪系统"""
mood_update_interval: int = 1
"""情绪更新间隔(秒)"""

View File

@@ -1,5 +1,5 @@
[inner]
version = "2.28.0"
version = "2.29.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更
@@ -64,6 +64,8 @@ chat_mode = "normal" # 聊天模式 —— 普通模式normal专注模式
# chat_mode = "focus"
# chat_mode = "auto"
max_context_size = 18 # 上下文长度
talk_frequency = 1 # 麦麦回复频率,越高,麦麦回复越频繁
time_based_talk_frequency = ["8:00,1", "12:00,1.5", "18:00,2", "01:00,0.5"]
@@ -112,7 +114,6 @@ ban_msgs_regex = [
[normal_chat] #普通聊天
#一般回复参数
normal_chat_first_probability = 0.5 # 麦麦回答时选择首要模型的概率与之相对的次要模型的概率为1 - normal_chat_first_probability
max_context_size = 15 #上下文长度
emoji_chance = 0.2 # 麦麦一般回复时使用表情包的概率设置为1让麦麦自己决定发不发
thinking_timeout = 120 # 麦麦最长思考时间超过这个时间的思考会放弃往往是api反应太慢
@@ -124,22 +125,18 @@ emoji_response_penalty = 0 # 对其他人发的表情包回复惩罚系数,设
mentioned_bot_inevitable_reply = true # 提及 bot 必然回复
at_bot_inevitable_reply = true # @bot 必然回复(包含提及)
enable_planner = false # 是否启用动作规划器(实验性功能,与focus_chat共享actions
enable_planner = false # 是否启用动作规划器与focus_chat共享actions
[focus_chat] #专注聊天
think_interval = 3 # 思考间隔 单位秒,可以有效减少消耗
consecutive_replies = 1 # 连续回复能力,值越高,麦麦连续回复的概率越高
processor_max_time = 20 # 处理器最大时间,单位秒,如果超过这个时间,处理器会自动停止
observation_context_size = 20 # 观察到的最长上下文大小
compressed_length = 8 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit = 4 #最多压缩份数,超过该数值的压缩上下文会被删除
[focus_chat_processor] # 专注聊天处理器打开可以实现更多功能但是会增加token消耗
person_impression_processor = true # 是否启用关系识别处理器
tool_use_processor = false # 是否启用工具使用处理器
working_memory_processor = false # 是否启用工作记忆处理器,消耗量大
expression_selector_processor = true # 是否启用表达方式选择处理器
[emoji]
max_reg_num = 60 # 表情包最大注册数量
@@ -169,6 +166,7 @@ consolidation_check_percentage = 0.05 # 检查节点比例
memory_ban_words = [ "表情包", "图片", "回复", "聊天记录" ]
[mood] # 仅在 普通聊天 有效
enable_mood = false # 是否启用情绪系统
mood_update_interval = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate = 0.95 # 情绪衰减率
mood_intensity_factor = 1.0 # 情绪强度因子