better:新增config项目

This commit is contained in:
SengokuCola
2025-04-14 22:09:00 +08:00
parent 53332791e4
commit d9f191705f
11 changed files with 51 additions and 31 deletions

View File

@@ -24,7 +24,7 @@ class ActionPlanner:
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal, temperature=0.2, max_tokens=1000, request_type="action_planning"
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="action_planning"
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME

View File

@@ -13,7 +13,7 @@ class KnowledgeFetcher:
def __init__(self):
self.llm = LLM_request(
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="knowledge_fetch"
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="knowledge_fetch"
)
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:

View File

@@ -16,7 +16,7 @@ class ReplyGenerator:
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal, temperature=0.2, max_tokens=300, request_type="reply_generation"
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=300, request_type="reply_generation"
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME

View File

@@ -29,7 +29,7 @@ class ResponseGenerator:
request_type="response_reasoning",
)
self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_reasoning"
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_reasoning"
)
self.model_sum = LLM_request(

View File

@@ -26,7 +26,7 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=0.15, max_tokens=256, request_type="response_heartflow"
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_heartflow"
)
self.model_sum = LLM_request(
@@ -49,7 +49,7 @@ class ResponseGenerator:
if random.random() > 0:
checked = False
current_model = self.model_normal
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
model_response = await self._generate_response_with_model(
message, current_model, thinking_id, mode="normal"
)
@@ -58,13 +58,13 @@ class ResponseGenerator:
else:
checked = True
current_model = self.model_normal
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
model_response = await self._generate_response_with_model(
message, current_model, thinking_id, mode="simple"
)
current_model.temperature = 0.3
current_model.temperature = global_config.llm_normal["temp"]
model_checked_response = await self._check_response_with_model(
message, model_response, current_model, thinking_id
)

View File

@@ -26,9 +26,9 @@ config_config = LogConfig(
logger = get_module_logger("config", config=config_config)
# 考虑到实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
is_test = True
is_test = False
mai_version_main = "0.6.2"
mai_version_fix = "snapshot-2"
mai_version_fix = ""
if mai_version_fix:
if is_test:
@@ -196,6 +196,9 @@ class BotConfig:
sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度超过心流观察到的上下文长度会压缩最短压缩长度为5
compress_length_limit: int = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
# willing
willing_mode: str = "classical" # 意愿模式
@@ -440,6 +443,16 @@ class BotConfig:
config.heart_flow_update_interval = heartflow_config.get(
"heart_flow_update_interval", config.heart_flow_update_interval
)
if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
config.observation_context_size = heartflow_config.get(
"observation_context_size", config.observation_context_size
)
config.compressed_length = heartflow_config.get(
"compressed_length", config.compressed_length
)
config.compress_length_limit = heartflow_config.get(
"compress_length_limit", config.compress_length_limit
)
def willing(parent: dict):
willing_config = parent["willing"]
@@ -477,7 +490,7 @@ class BotConfig:
"llm_emotion_judge",
"vlm",
"embedding",
"moderation",
"llm_tool_use",
"llm_observation",
"llm_sub_heartflow",
"llm_heartflow",
@@ -489,7 +502,7 @@ class BotConfig:
# base_url 的例子: SILICONFLOW_BASE_URL
# key 的例子: SILICONFLOW_KEY
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0}
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0, "temp": 0.7}
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
cfg_target = cfg_item
@@ -502,6 +515,7 @@ class BotConfig:
stable_item.append("stream")
pricing_item = ["pri_in", "pri_out"]
# 从配置中原始拷贝稳定字段
for i in stable_item:
# 如果 字段 属于计费项 且获取不到,那默认值是 0
@@ -519,6 +533,13 @@ class BotConfig:
logger.error(f"{item} 中的必要字段不存在,请检查")
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
# 如果配置中有temp参数就使用配置中的值
if "temp" in cfg_item:
cfg_target["temp"] = cfg_item["temp"]
else:
# 如果没有temp参数就删除默认值
cfg_target.pop("temp", None)
provider = cfg_item.get("provider")
if provider is None:
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")