diff --git a/src/chat/normal_chat/normal_chat_generator.py b/src/chat/normal_chat/normal_chat_generator.py index 28df6f189..5d17d22ab 100644 --- a/src/chat/normal_chat/normal_chat_generator.py +++ b/src/chat/normal_chat/normal_chat_generator.py @@ -21,13 +21,13 @@ class NormalChatGenerator: model=global_config.model.normal_chat_1, # temperature=0.7, max_tokens=3000, - request_type="normal_chat_1", + request_type="normal.chat_1", ) self.model_normal = LLMRequest( model=global_config.model.normal_chat_2, # temperature=global_config.model.normal_chat_2["temp"], max_tokens=256, - request_type="normal_chat_2", + request_type="normal.chat_2", ) self.model_sum = LLMRequest( diff --git a/template/bot_config_template.toml b/template/bot_config_template.toml index 0ca1e40ac..1908f5ae9 100644 --- a/template/bot_config_template.toml +++ b/template/bot_config_template.toml @@ -100,7 +100,7 @@ compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下 [focus_chat_processor] # 专注聊天处理器,打开可以实现更多功能,但是会增加token消耗 self_identify_processor = true # 是否启用自我识别处理器 tool_use_processor = false # 是否启用工具使用处理器 -working_memory_processor = false # 是否启用工作记忆处理器 +working_memory_processor = false # 是否启用工作记忆处理器,不稳定,消耗量大 [emoji] max_reg_num = 40 # 表情包最大注册数量