From c4b09177fad08a4cb04b815387640c2aae796c23 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 9 Mar 2025 22:56:13 +0800 Subject: [PATCH 1/2] =?UTF-8?q?=E4=B8=80=E4=B8=AA=E8=AE=B0=E5=BF=86?= =?UTF-8?q?=E5=8F=AF=E8=A7=86=E5=8C=96=E5=B0=8F=E8=84=9A=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1 --- run_memory_vis.bat | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 run_memory_vis.bat diff --git a/run_memory_vis.bat b/run_memory_vis.bat new file mode 100644 index 000000000..14b9c766f --- /dev/null +++ b/run_memory_vis.bat @@ -0,0 +1,18 @@ +@echo on +chcp 65001 > nul +set /p CONDA_ENV="请输入要激活的 conda 环境名称: " +call conda activate %CONDA_ENV% +if errorlevel 1 ( + echo 激活 conda 环境失败 + pause + exit /b 1 +) +echo Conda 环境 "%CONDA_ENV%" 激活成功 +python src/plugins/memory_system/memory_manual_build.py +if errorlevel 1 ( + echo 命令执行失败,错误代码 %errorlevel% + pause + exit /b 1 +) +echo 脚本成功完成 +pause \ No newline at end of file From 089d6a6c796ffa4987476c6fc5624e795ce70eeb Mon Sep 17 00:00:00 2001 From: HYY Date: Sun, 9 Mar 2025 23:00:09 +0800 Subject: [PATCH 2/2] =?UTF-8?q?feat:=20=E9=92=88=E5=AF=B9=E7=A1=85?= =?UTF-8?q?=E5=9F=BA=E6=B5=81=E5=8A=A8=E7=9A=84Pro=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BA=86=E8=87=AA=E5=8A=A8=E9=99=8D=E7=BA=A7?= =?UTF-8?q?=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/models/utils_model.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py index e890b4c80..bd06fd6dd 100644 --- a/src/plugins/models/utils_model.py +++ b/src/plugins/models/utils_model.py @@ -182,13 +182,26 @@ class LLM_request: continue elif response.status in policy["abort_codes"]: logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}") - if response.status == 403 : - if global_config.llm_normal == "Pro/deepseek-ai/DeepSeek-V3": - logger.error("可能是没有给硅基流动充钱,普通模型自动退化至非Pro模型,反应速度可能会变慢") - global_config.llm_normal = "deepseek-ai/DeepSeek-V3" - if global_config.llm_reasoning == "Pro/deepseek-ai/DeepSeek-R1": - logger.error("可能是没有给硅基流动充钱,推理模型自动退化至非Pro模型,反应速度可能会变慢") - global_config.llm_reasoning = "deepseek-ai/DeepSeek-R1" + if response.status == 403: + # 尝试降级Pro模型 + if self.model_name.startswith("Pro/") and self.base_url == "https://api.siliconflow.cn/v1/": + old_model_name = self.model_name + self.model_name = self.model_name[4:] # 移除"Pro/"前缀 + logger.warning(f"检测到403错误,模型从 {old_model_name} 降级为 {self.model_name}") + + # 对全局配置进行更新 + if hasattr(global_config, 'llm_normal') and global_config.llm_normal.get('name') == old_model_name: + global_config.llm_normal['name'] = self.model_name + logger.warning(f"已将全局配置中的 llm_normal 模型降级") + + # 更新payload中的模型名 + if payload and 'model' in payload: + payload['model'] = self.model_name + + # 重新尝试请求 + retry -= 1 # 不计入重试次数 + continue + raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}") response.raise_for_status()