better:优化回复逻辑,现在回复前会先思考,移除推理模型再回复中的使用,优化心流运行逻辑,优化思考时间计算逻辑,添加错误检测

This commit is contained in:
SengokuCola
2025-03-31 22:34:52 +08:00
parent 42b1b772ef
commit 4c42c90879
14 changed files with 254 additions and 193 deletions

View File

@@ -231,7 +231,7 @@ class BotConfig:
# 模型配置
llm_reasoning: Dict[str, str] = field(default_factory=lambda: {})
llm_reasoning_minor: Dict[str, str] = field(default_factory=lambda: {})
# llm_reasoning_minor: Dict[str, str] = field(default_factory=lambda: {})
llm_normal: Dict[str, str] = field(default_factory=lambda: {})
llm_topic_judge: Dict[str, str] = field(default_factory=lambda: {})
llm_summary_by_topic: Dict[str, str] = field(default_factory=lambda: {})
@@ -370,9 +370,9 @@ class BotConfig:
response_config = parent["response"]
config.MODEL_R1_PROBABILITY = response_config.get("model_r1_probability", config.MODEL_R1_PROBABILITY)
config.MODEL_V3_PROBABILITY = response_config.get("model_v3_probability", config.MODEL_V3_PROBABILITY)
config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
"model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
)
# config.MODEL_R1_DISTILL_PROBABILITY = response_config.get(
# "model_r1_distill_probability", config.MODEL_R1_DISTILL_PROBABILITY
# )
config.max_response_length = response_config.get("max_response_length", config.max_response_length)
def willing(parent: dict):
@@ -397,7 +397,7 @@ class BotConfig:
config_list = [
"llm_reasoning",
"llm_reasoning_minor",
# "llm_reasoning_minor",
"llm_normal",
"llm_topic_judge",
"llm_summary_by_topic",