better:进一步拆分模型配置

This commit is contained in:
SengokuCola
2025-05-27 18:35:33 +08:00
parent 52f7cc3762
commit cad9b40bb3
10 changed files with 61 additions and 49 deletions

View File

@@ -374,7 +374,7 @@ class EmojiManager:
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
self.llm_emotion_judge = LLMRequest(
model=global_config.model.normal, max_tokens=600, request_type="emoji"
model=global_config.model.utils, max_tokens=600, request_type="emoji"
) # 更高的温度更少的token后续可以根据情绪来调整温度
self.emoji_num = 0

View File

@@ -192,9 +192,9 @@ class DefaultExpressor:
"""
try:
# 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = mood_manager.get_arousal_multiplier()
current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
self.express_model.params["temperature"] = current_temp # 动态调整温度
# arousal_multiplier = mood_manager.get_arousal_multiplier()
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
# self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)

View File

@@ -28,7 +28,7 @@ class ChattingInfoProcessor(BaseProcessor):
super().__init__()
# TODO: API-Adapter修改标记
self.model_summary = LLMRequest(
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
model=global_config.model.utils_small, temperature=0.7, max_tokens=300, request_type="chat_observation"
)
async def process_info(

View File

@@ -20,13 +20,13 @@ class NormalChatGenerator:
model=global_config.model.normal_chat_1,
temperature=0.7,
max_tokens=3000,
request_type="response_reasoning",
request_type="normal_chat_1",
)
self.model_normal = LLMRequest(
model=global_config.model.normal,
temperature=global_config.model.normal["temp"],
model=global_config.model.normal_chat_2,
temperature=global_config.model.normal_chat_2["temp"],
max_tokens=256,
request_type="response_reasoning",
request_type="normal_chat_2",
)
self.model_sum = LLMRequest(

View File

@@ -377,12 +377,19 @@ class ModelConfig(ConfigBase):
"""模型配置类"""
model_max_output_length: int = 800 # 最大回复长度
utils: dict[str, Any] = field(default_factory=lambda: {})
"""组件模型配置"""
utils_small: dict[str, Any] = field(default_factory=lambda: {})
"""组件小模型配置"""
reasoning: dict[str, Any] = field(default_factory=lambda: {})
"""推理模型配置"""
normal: dict[str, Any] = field(default_factory=lambda: {})
"""普通模型配置"""
normal_chat_1: dict[str, Any] = field(default_factory=lambda: {})
"""normal_chat首要回复模型模型配置"""
normal_chat_2: dict[str, Any] = field(default_factory=lambda: {})
"""normal_chat次要回复模型配置"""
memory_summary: dict[str, Any] = field(default_factory=lambda: {})
"""记忆的概括模型配置"""
@@ -390,9 +397,6 @@ class ModelConfig(ConfigBase):
vlm: dict[str, Any] = field(default_factory=lambda: {})
"""视觉语言模型配置"""
observation: dict[str, Any] = field(default_factory=lambda: {})
"""观察模型配置"""
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
"""专注工作记忆模型配置"""

View File

@@ -44,7 +44,7 @@ class GoalAnalyzer:
def __init__(self, stream_id: str, private_name: str):
# TODO: API-Adapter修改标记
self.llm = LLMRequest(
model=global_config.model.normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
model=global_config.model.utils, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
)
self.personality_info = individuality.get_prompt(x_person=2, level=3)

View File

@@ -16,8 +16,8 @@ class KnowledgeFetcher:
def __init__(self, private_name: str):
# TODO: API-Adapter修改标记
self.llm = LLMRequest(
model=global_config.model.normal,
temperature=global_config.model.normal["temp"],
model=global_config.model.utils,
temperature=global_config.model.utils["temp"],
max_tokens=1000,
request_type="knowledge_fetch",
)

View File

@@ -497,8 +497,8 @@ class LLMRequest:
logger.warning(f"检测到403错误模型从 {old_model_name} 降级为 {self.model_name}")
# 对全局配置进行更新
if global_config.model.normal.get("name") == old_model_name:
global_config.model.normal["name"] = self.model_name
if global_config.model.normal_chat_2.get("name") == old_model_name:
global_config.model.normal_chat_2["name"] = self.model_name
logger.warning(f"将全局配置中的 llm_normal 模型临时降级至{self.model_name}")
if global_config.model.normal_chat_1.get("name") == old_model_name:
global_config.model.normal_chat_1["name"] = self.model_name

View File

@@ -58,7 +58,7 @@ class PersonInfoManager:
self.person_name_list = {}
# TODO: API-Adapter修改标记
self.qv_name_llm = LLMRequest(
model=global_config.model.normal,
model=global_config.model.utils,
max_tokens=256,
request_type="qv_name",
)