From c672f198edafd2b43378cecd18fe464e348848cd Mon Sep 17 00:00:00 2001 From: tt-P607 <68868379+tt-P607@users.noreply.github.com> Date: Sat, 1 Nov 2025 19:00:59 +0800 Subject: [PATCH 1/2] =?UTF-8?q?fix(core):=20=E4=BC=98=E5=8C=96=E5=BA=94?= =?UTF-8?q?=E7=94=A8=E5=85=B3=E9=97=AD=E6=B5=81=E7=A8=8B=EF=BC=8C=E7=A1=AE?= =?UTF-8?q?=E4=BF=9D=E6=95=B0=E6=8D=AE=E5=BA=93=E6=9C=80=E5=90=8E=E5=85=B3?= =?UTF-8?q?=E9=97=AD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 将数据库服务的停止操作移至所有清理任务执行完毕后,以防止其他组件在关闭时因无法访问数据库而产生异常。 此外,为数据库关闭操作增加了超时处理,增强了系统关闭时的健壮性。 - chore(config): 将模板配置文件中的默认模型由 DeepSeek-V3.1 全面升级至 DeepSeek-V3.2-Exp,以提升默认性能。 --- src/main.py | 20 +++++++++++-------- template/model_config_template.toml | 30 ++++++++++++++--------------- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/main.py b/src/main.py index 09e8d974c..1ac6f8e51 100644 --- a/src/main.py +++ b/src/main.py @@ -218,14 +218,6 @@ class MainSystem: cleanup_tasks = [] - # 停止数据库服务 - try: - from src.common.database.core import close_engine as stop_database - - cleanup_tasks.append(("数据库服务", stop_database())) - except Exception as e: - logger.error(f"准备停止数据库服务时出错: {e}") - # 停止消息批处理器 try: from src.chat.message_receive.storage import get_message_storage_batcher, get_message_update_batcher @@ -329,6 +321,18 @@ class MainSystem: else: logger.warning("没有需要清理的任务") + # 停止数据库服务 (在所有其他任务完成后最后停止) + try: + from src.common.database.core import close_engine as stop_database + + logger.info("正在停止数据库服务...") + await asyncio.wait_for(stop_database(), timeout=15.0) + logger.info("🛑 数据库服务已停止") + except asyncio.TimeoutError: + logger.error("停止数据库服务超时") + except Exception as e: + logger.error(f"停止数据库服务时出错: {e}") + def _cleanup(self) -> None: """同步清理资源(向后兼容)""" try: diff --git a/template/model_config_template.toml b/template/model_config_template.toml index 69e992a96..34b4a9595 100644 --- a/template/model_config_template.toml +++ b/template/model_config_template.toml @@ -1,5 +1,5 @@ [inner] -version = "1.3.6" +version = "1.3.7" # 配置文件版本号迭代规则同bot_config.toml @@ -53,8 +53,8 @@ price_out = 8.0 # 输出价格(用于API调用统计,单 #use_anti_truncation = true # [可选] 启用反截断功能。当模型输出不完整时,系统会自动重试。建议只为有需要的模型(如Gemini)开启。 [[models]] -model_identifier = "deepseek-ai/DeepSeek-V3.1-Terminus" -name = "siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus" +model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp" +name = "siliconflow-deepseek-ai/DeepSeek-V3.2-Exp" api_provider = "SiliconFlow" price_in = 2.0 price_out = 8.0 @@ -122,7 +122,7 @@ price_in = 4.0 price_out = 16.0 [model_task_config.utils] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,是麦麦必须的模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] # 使用的模型列表,每个子项对应上面的模型名称(name) +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] # 使用的模型列表,每个子项对应上面的模型名称(name) temperature = 0.2 # 模型温度,新V3建议0.1-0.3 max_tokens = 800 # 最大输出token数 #concurrency_count = 2 # 并发请求数量,默认为1(不并发),设置为2或更高启用并发 @@ -133,28 +133,28 @@ temperature = 0.7 max_tokens = 800 [model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.2 # 模型温度,新V3建议0.1-0.3 max_tokens = 800 [model_task_config.planner] #决策:负责决定麦麦该做什么的模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.3 max_tokens = 800 [model_task_config.emotion] #负责麦麦的情绪变化 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.3 max_tokens = 800 [model_task_config.mood] #负责麦麦的心情变化 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.3 max_tokens = 800 [model_task_config.maizone] # maizone模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.7 max_tokens = 800 @@ -181,22 +181,22 @@ temperature = 0.7 max_tokens = 800 [model_task_config.schedule_generator]#日程表生成模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.7 max_tokens = 1000 [model_task_config.anti_injection] # 反注入检测专用模型 -model_list = ["moonshotai-Kimi-K2-Instruct"] # 使用快速的小模型进行检测 +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] # 使用快速的小模型进行检测 temperature = 0.1 # 低温度确保检测结果稳定 max_tokens = 200 # 检测结果不需要太长的输出 [model_task_config.monthly_plan_generator] # 月层计划生成模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.7 max_tokens = 1000 [model_task_config.relationship_tracker] # 用户关系追踪模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.7 max_tokens = 1000 @@ -210,12 +210,12 @@ embedding_dimension = 1024 #------------LPMM知识库模型------------ [model_task_config.lpmm_entity_extract] # 实体提取模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.2 max_tokens = 800 [model_task_config.lpmm_rdf_build] # RDF构建模型 -model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.1-Terminus"] +model_list = ["siliconflow-deepseek-ai/DeepSeek-V3.2-Exp"] temperature = 0.2 max_tokens = 800 From 5f384da4894799107cf177c49f336ce8944a8abb Mon Sep 17 00:00:00 2001 From: minecraft1024a Date: Sat, 1 Nov 2025 19:31:34 +0800 Subject: [PATCH 2/2] =?UTF-8?q?refactor(chat):=20=E7=AE=80=E5=8C=96?= =?UTF-8?q?=E6=97=A5=E7=A8=8B=E7=8A=B6=E6=80=81=E6=8F=90=E7=A4=BA=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 删除了在日程活动时间范围之外的冗余状态判断。现在,无论当前时间是否在活动时间段内,都会统一计算并展示活动的开始、结束、已进行和剩余时间,简化了代码逻辑并确保了信息展示的一致性。反正LLM自己会判断的,不需要咱操心那么多啦。 --- src/chat/replyer/default_generator.py | 9 +++------ src/llm_models/model_client/openai_client.py | 4 ++-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/chat/replyer/default_generator.py b/src/chat/replyer/default_generator.py index 106295ca9..5b214aade 100644 --- a/src/chat/replyer/default_generator.py +++ b/src/chat/replyer/default_generator.py @@ -1468,18 +1468,15 @@ class DefaultReplyer: if now < start_time: now += timedelta(days=1) - if start_time <= now < end_time: - duration_minutes = (now - start_time).total_seconds() / 60 - remaining_minutes = (end_time - now).total_seconds() / 60 - schedule_block = ( + duration_minutes = (now - start_time).total_seconds() / 60 + remaining_minutes = (end_time - now).total_seconds() / 60 + schedule_block = ( f"你当前正在进行“{activity}”," f"计划时间从{start_time.strftime('%H:%M')}到{end_time.strftime('%H:%M')}。" f"这项活动已经开始了{duration_minutes:.0f}分钟," f"预计还有{remaining_minutes:.0f}分钟结束。" "(此为你的当前状态,仅供参考。除非被直接询问,否则不要在对话中主动提及。)" ) - else: - schedule_block = f"你当前正在进行“{activity}”。(此为你的当前状态,仅供参考。除非被直接询问,否则不要在对话中主动提及。)" except (ValueError, AttributeError): schedule_block = f"你当前正在进行“{activity}”。(此为你的当前状态,仅供参考。除非被直接询问,否则不要在对话中主动提及。)" diff --git a/src/llm_models/model_client/openai_client.py b/src/llm_models/model_client/openai_client.py index 3b6055b45..efa1785d9 100644 --- a/src/llm_models/model_client/openai_client.py +++ b/src/llm_models/model_client/openai_client.py @@ -350,10 +350,10 @@ def _default_normal_response_parser( api_response.tool_calls = [] for call in message_part.tool_calls: try: - arguments = orjson.loads(repair_json(call.function.arguments)) + arguments = orjson.loads(repair_json(call.function.arguments)) # type: ignore if not isinstance(arguments, dict): raise RespParseException(resp, "响应解析失败,工具调用参数无法解析为字典类型") - api_response.tool_calls.append(ToolCall(call.id, call.function.name, arguments)) + api_response.tool_calls.append(ToolCall(call.id, call.function.name, arguments)) # type: ignore except orjson.JSONDecodeError as e: raise RespParseException(resp, "响应解析失败,无法解析工具调用参数") from e