better:新增config项目
This commit is contained in:
@@ -19,7 +19,7 @@ logger = get_module_logger("tool_use", config=tool_use_config)
|
|||||||
class ToolUser:
|
class ToolUser:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.llm_model_tool = LLM_request(
|
self.llm_model_tool = LLM_request(
|
||||||
model=global_config.llm_heartflow, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _build_tool_prompt(self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None):
|
async def _build_tool_prompt(self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None):
|
||||||
|
|||||||
@@ -29,17 +29,10 @@ class ChattingObservation(Observation):
|
|||||||
self.name = global_config.BOT_NICKNAME
|
self.name = global_config.BOT_NICKNAME
|
||||||
self.nick_name = global_config.BOT_ALIAS_NAMES
|
self.nick_name = global_config.BOT_ALIAS_NAMES
|
||||||
|
|
||||||
self.observe_times = 0
|
self.max_now_obs_len = global_config.observation_context_size
|
||||||
|
self.overlap_len = global_config.compressed_length
|
||||||
self.summary_count = 0 # 30秒内的更新次数
|
|
||||||
self.max_update_in_30s = 2 # 30秒内最多更新2次
|
|
||||||
self.last_summary_time = 0 # 上次更新summary的时间
|
|
||||||
|
|
||||||
self.sub_observe = None
|
|
||||||
self.max_now_obs_len = 20
|
|
||||||
self.overlap_len = 5
|
|
||||||
self.mid_memorys = []
|
self.mid_memorys = []
|
||||||
self.max_mid_memory_len = 5
|
self.max_mid_memory_len = global_config.compress_length_limit
|
||||||
self.mid_memory_info = ""
|
self.mid_memory_info = ""
|
||||||
self.now_message_info = ""
|
self.now_message_info = ""
|
||||||
|
|
||||||
|
|||||||
@@ -79,7 +79,7 @@ class SubHeartflow:
|
|||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state: CurrentState = CurrentState()
|
self.current_state: CurrentState = CurrentState()
|
||||||
self.llm_model = LLM_request(
|
self.llm_model = LLM_request(
|
||||||
model=global_config.llm_sub_heartflow, temperature=0.2, max_tokens=600, request_type="sub_heart_flow"
|
model=global_config.llm_sub_heartflow, temperature=global_config.llm_sub_heartflow["temp"], max_tokens=600, request_type="sub_heart_flow"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.main_heartflow_info = ""
|
self.main_heartflow_info = ""
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ class ActionPlanner:
|
|||||||
|
|
||||||
def __init__(self, stream_id: str):
|
def __init__(self, stream_id: str):
|
||||||
self.llm = LLM_request(
|
self.llm = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.2, max_tokens=1000, request_type="action_planning"
|
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="action_planning"
|
||||||
)
|
)
|
||||||
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
||||||
self.name = global_config.BOT_NICKNAME
|
self.name = global_config.BOT_NICKNAME
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ class KnowledgeFetcher:
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.llm = LLM_request(
|
self.llm = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="knowledge_fetch"
|
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="knowledge_fetch"
|
||||||
)
|
)
|
||||||
|
|
||||||
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
|
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ class ReplyGenerator:
|
|||||||
|
|
||||||
def __init__(self, stream_id: str):
|
def __init__(self, stream_id: str):
|
||||||
self.llm = LLM_request(
|
self.llm = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.2, max_tokens=300, request_type="reply_generation"
|
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=300, request_type="reply_generation"
|
||||||
)
|
)
|
||||||
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
||||||
self.name = global_config.BOT_NICKNAME
|
self.name = global_config.BOT_NICKNAME
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ class ResponseGenerator:
|
|||||||
request_type="response_reasoning",
|
request_type="response_reasoning",
|
||||||
)
|
)
|
||||||
self.model_normal = LLM_request(
|
self.model_normal = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_reasoning"
|
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_reasoning"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLM_request(
|
self.model_sum = LLM_request(
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ logger = get_module_logger("llm_generator", config=llm_config)
|
|||||||
class ResponseGenerator:
|
class ResponseGenerator:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.model_normal = LLM_request(
|
self.model_normal = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.15, max_tokens=256, request_type="response_heartflow"
|
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_heartflow"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLM_request(
|
self.model_sum = LLM_request(
|
||||||
@@ -49,7 +49,7 @@ class ResponseGenerator:
|
|||||||
if random.random() > 0:
|
if random.random() > 0:
|
||||||
checked = False
|
checked = False
|
||||||
current_model = self.model_normal
|
current_model = self.model_normal
|
||||||
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
|
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
|
||||||
model_response = await self._generate_response_with_model(
|
model_response = await self._generate_response_with_model(
|
||||||
message, current_model, thinking_id, mode="normal"
|
message, current_model, thinking_id, mode="normal"
|
||||||
)
|
)
|
||||||
@@ -58,13 +58,13 @@ class ResponseGenerator:
|
|||||||
else:
|
else:
|
||||||
checked = True
|
checked = True
|
||||||
current_model = self.model_normal
|
current_model = self.model_normal
|
||||||
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
|
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
|
||||||
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
|
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
|
||||||
model_response = await self._generate_response_with_model(
|
model_response = await self._generate_response_with_model(
|
||||||
message, current_model, thinking_id, mode="simple"
|
message, current_model, thinking_id, mode="simple"
|
||||||
)
|
)
|
||||||
|
|
||||||
current_model.temperature = 0.3
|
current_model.temperature = global_config.llm_normal["temp"]
|
||||||
model_checked_response = await self._check_response_with_model(
|
model_checked_response = await self._check_response_with_model(
|
||||||
message, model_response, current_model, thinking_id
|
message, model_response, current_model, thinking_id
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -26,9 +26,9 @@ config_config = LogConfig(
|
|||||||
logger = get_module_logger("config", config=config_config)
|
logger = get_module_logger("config", config=config_config)
|
||||||
|
|
||||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||||
is_test = True
|
is_test = False
|
||||||
mai_version_main = "0.6.2"
|
mai_version_main = "0.6.2"
|
||||||
mai_version_fix = "snapshot-2"
|
mai_version_fix = ""
|
||||||
|
|
||||||
if mai_version_fix:
|
if mai_version_fix:
|
||||||
if is_test:
|
if is_test:
|
||||||
@@ -196,6 +196,9 @@ class BotConfig:
|
|||||||
sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
sub_heart_flow_freeze_time: int = 120 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
||||||
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
sub_heart_flow_stop_time: int = 600 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
||||||
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
|
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
|
||||||
|
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
||||||
|
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||||
|
compress_length_limit: int = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||||
|
|
||||||
# willing
|
# willing
|
||||||
willing_mode: str = "classical" # 意愿模式
|
willing_mode: str = "classical" # 意愿模式
|
||||||
@@ -440,6 +443,16 @@ class BotConfig:
|
|||||||
config.heart_flow_update_interval = heartflow_config.get(
|
config.heart_flow_update_interval = heartflow_config.get(
|
||||||
"heart_flow_update_interval", config.heart_flow_update_interval
|
"heart_flow_update_interval", config.heart_flow_update_interval
|
||||||
)
|
)
|
||||||
|
if config.INNER_VERSION in SpecifierSet(">=1.3.0"):
|
||||||
|
config.observation_context_size = heartflow_config.get(
|
||||||
|
"observation_context_size", config.observation_context_size
|
||||||
|
)
|
||||||
|
config.compressed_length = heartflow_config.get(
|
||||||
|
"compressed_length", config.compressed_length
|
||||||
|
)
|
||||||
|
config.compress_length_limit = heartflow_config.get(
|
||||||
|
"compress_length_limit", config.compress_length_limit
|
||||||
|
)
|
||||||
|
|
||||||
def willing(parent: dict):
|
def willing(parent: dict):
|
||||||
willing_config = parent["willing"]
|
willing_config = parent["willing"]
|
||||||
@@ -477,7 +490,7 @@ class BotConfig:
|
|||||||
"llm_emotion_judge",
|
"llm_emotion_judge",
|
||||||
"vlm",
|
"vlm",
|
||||||
"embedding",
|
"embedding",
|
||||||
"moderation",
|
"llm_tool_use",
|
||||||
"llm_observation",
|
"llm_observation",
|
||||||
"llm_sub_heartflow",
|
"llm_sub_heartflow",
|
||||||
"llm_heartflow",
|
"llm_heartflow",
|
||||||
@@ -489,7 +502,7 @@ class BotConfig:
|
|||||||
|
|
||||||
# base_url 的例子: SILICONFLOW_BASE_URL
|
# base_url 的例子: SILICONFLOW_BASE_URL
|
||||||
# key 的例子: SILICONFLOW_KEY
|
# key 的例子: SILICONFLOW_KEY
|
||||||
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0}
|
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0, "temp": 0.7}
|
||||||
|
|
||||||
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
|
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
|
||||||
cfg_target = cfg_item
|
cfg_target = cfg_item
|
||||||
@@ -502,6 +515,7 @@ class BotConfig:
|
|||||||
stable_item.append("stream")
|
stable_item.append("stream")
|
||||||
|
|
||||||
pricing_item = ["pri_in", "pri_out"]
|
pricing_item = ["pri_in", "pri_out"]
|
||||||
|
|
||||||
# 从配置中原始拷贝稳定字段
|
# 从配置中原始拷贝稳定字段
|
||||||
for i in stable_item:
|
for i in stable_item:
|
||||||
# 如果 字段 属于计费项 且获取不到,那默认值是 0
|
# 如果 字段 属于计费项 且获取不到,那默认值是 0
|
||||||
@@ -519,6 +533,13 @@ class BotConfig:
|
|||||||
logger.error(f"{item} 中的必要字段不存在,请检查")
|
logger.error(f"{item} 中的必要字段不存在,请检查")
|
||||||
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
|
raise KeyError(f"{item} 中的必要字段 {e} 不存在,请检查") from e
|
||||||
|
|
||||||
|
# 如果配置中有temp参数,就使用配置中的值
|
||||||
|
if "temp" in cfg_item:
|
||||||
|
cfg_target["temp"] = cfg_item["temp"]
|
||||||
|
else:
|
||||||
|
# 如果没有temp参数,就删除默认值
|
||||||
|
cfg_target.pop("temp", None)
|
||||||
|
|
||||||
provider = cfg_item.get("provider")
|
provider = cfg_item.get("provider")
|
||||||
if provider is None:
|
if provider is None:
|
||||||
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
|
logger.error(f"provider 字段在模型配置 {item} 中不存在,请检查")
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[inner]
|
[inner]
|
||||||
version = "1.2.7"
|
version = "1.3.0"
|
||||||
|
|
||||||
|
|
||||||
#以下是给开发人员阅读的,一般用户不需要阅读
|
#以下是给开发人员阅读的,一般用户不需要阅读
|
||||||
@@ -77,7 +77,11 @@ model_v3_probability = 0.3 # 麦麦回答时选择次要回复模型2 模型的
|
|||||||
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
|
sub_heart_flow_update_interval = 60 # 子心流更新频率,间隔 单位秒
|
||||||
sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
sub_heart_flow_freeze_time = 100 # 子心流冻结时间,超过这个时间没有回复,子心流会冻结,间隔 单位秒
|
||||||
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
sub_heart_flow_stop_time = 500 # 子心流停止时间,超过这个时间没有回复,子心流会停止,间隔 单位秒
|
||||||
heart_flow_update_interval = 300 # 心流更新频率,间隔 单位秒
|
heart_flow_update_interval = 600 # 心流更新频率,间隔 单位秒
|
||||||
|
|
||||||
|
observation_context_size = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
||||||
|
compressed_length = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||||
|
compress_length_limit = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
|
||||||
|
|
||||||
|
|
||||||
[message]
|
[message]
|
||||||
@@ -189,11 +193,12 @@ pri_out = 16 #模型的输出价格(非必填,可以记录消耗)
|
|||||||
|
|
||||||
#非推理模型
|
#非推理模型
|
||||||
|
|
||||||
[model.llm_normal] #V3 回复模型1 主要回复模型,默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改代码中的temp参数
|
[model.llm_normal] #V3 回复模型1 主要回复模型,默认temp 0.2 如果你使用的是老V3或者其他模型,请自己修改temp参数
|
||||||
name = "Pro/deepseek-ai/DeepSeek-V3"
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
pri_in = 2 #模型的输入价格(非必填,可以记录消耗)
|
||||||
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
pri_out = 8 #模型的输出价格(非必填,可以记录消耗)
|
||||||
|
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||||
|
|
||||||
[model.llm_emotion_judge] #表情包判断
|
[model.llm_emotion_judge] #表情包判断
|
||||||
name = "Qwen/Qwen2.5-14B-Instruct"
|
name = "Qwen/Qwen2.5-14B-Instruct"
|
||||||
@@ -213,11 +218,11 @@ provider = "SILICONFLOW"
|
|||||||
pri_in = 1.26
|
pri_in = 1.26
|
||||||
pri_out = 1.26
|
pri_out = 1.26
|
||||||
|
|
||||||
[model.moderation] #内容审核,开发中
|
[model.llm_tool_use] #工具调用模型,需要使用支持工具调用的模型,建议使用qwen2.5 32b
|
||||||
name = ""
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 1.0
|
pri_in = 1.26
|
||||||
pri_out = 2.0
|
pri_out = 1.26
|
||||||
|
|
||||||
# 识图模型
|
# 识图模型
|
||||||
|
|
||||||
@@ -247,6 +252,7 @@ name = "Pro/deepseek-ai/DeepSeek-V3"
|
|||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 2
|
pri_in = 2
|
||||||
pri_out = 8
|
pri_out = 8
|
||||||
|
temp = 0.2 #模型的温度,新V3建议0.1-0.3
|
||||||
|
|
||||||
[model.llm_heartflow] #心流:建议使用qwen2.5 32b
|
[model.llm_heartflow] #心流:建议使用qwen2.5 32b
|
||||||
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
|
|||||||
Reference in New Issue
Block a user