better:进一步拆分模型配置
This commit is contained in:
@@ -374,7 +374,7 @@ class EmojiManager:
|
||||
|
||||
self.vlm = LLMRequest(model=global_config.model.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
|
||||
self.llm_emotion_judge = LLMRequest(
|
||||
model=global_config.model.normal, max_tokens=600, request_type="emoji"
|
||||
model=global_config.model.utils, max_tokens=600, request_type="emoji"
|
||||
) # 更高的温度,更少的token(后续可以根据情绪来调整温度)
|
||||
|
||||
self.emoji_num = 0
|
||||
|
||||
@@ -192,9 +192,9 @@ class DefaultExpressor:
|
||||
"""
|
||||
try:
|
||||
# 1. 获取情绪影响因子并调整模型温度
|
||||
arousal_multiplier = mood_manager.get_arousal_multiplier()
|
||||
current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
|
||||
self.express_model.params["temperature"] = current_temp # 动态调整温度
|
||||
# arousal_multiplier = mood_manager.get_arousal_multiplier()
|
||||
# current_temp = float(global_config.model.normal["temp"]) * arousal_multiplier
|
||||
# self.express_model.params["temperature"] = current_temp # 动态调整温度
|
||||
|
||||
# 2. 获取信息捕捉器
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
@@ -28,7 +28,7 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
super().__init__()
|
||||
# TODO: API-Adapter修改标记
|
||||
self.model_summary = LLMRequest(
|
||||
model=global_config.model.observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
model=global_config.model.utils_small, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
|
||||
async def process_info(
|
||||
|
||||
@@ -20,13 +20,13 @@ class NormalChatGenerator:
|
||||
model=global_config.model.normal_chat_1,
|
||||
temperature=0.7,
|
||||
max_tokens=3000,
|
||||
request_type="response_reasoning",
|
||||
request_type="normal_chat_1",
|
||||
)
|
||||
self.model_normal = LLMRequest(
|
||||
model=global_config.model.normal,
|
||||
temperature=global_config.model.normal["temp"],
|
||||
model=global_config.model.normal_chat_2,
|
||||
temperature=global_config.model.normal_chat_2["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_reasoning",
|
||||
request_type="normal_chat_2",
|
||||
)
|
||||
|
||||
self.model_sum = LLMRequest(
|
||||
|
||||
Reference in New Issue
Block a user