From cad5011c24ccc2a76df2cc1319e7d3968c249f9c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 14 Apr 2025 14:28:14 +0000 Subject: [PATCH] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8=E6=A0=BC?= =?UTF-8?q?=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/heart_flow/sub_heartflow.py | 5 ++++- src/plugins/PFC/action_planner.py | 5 ++++- src/plugins/PFC/pfc_KnowledgeFetcher.py | 5 ++++- src/plugins/PFC/reply_generator.py | 5 ++++- .../reasoning_chat/reasoning_generator.py | 5 ++++- .../think_flow_chat/think_flow_generator.py | 13 ++++++++++--- src/plugins/config/config.py | 18 ++++++++++++------ 7 files changed, 42 insertions(+), 14 deletions(-) diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index 0f0aa85a9..b9da0f7ee 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -79,7 +79,10 @@ class SubHeartflow: self.past_mind = [] self.current_state: CurrentState = CurrentState() self.llm_model = LLM_request( - model=global_config.llm_sub_heartflow, temperature=global_config.llm_sub_heartflow["temp"], max_tokens=600, request_type="sub_heart_flow" + model=global_config.llm_sub_heartflow, + temperature=global_config.llm_sub_heartflow["temp"], + max_tokens=600, + request_type="sub_heart_flow", ) self.main_heartflow_info = "" diff --git a/src/plugins/PFC/action_planner.py b/src/plugins/PFC/action_planner.py index e922d0587..cc904662d 100644 --- a/src/plugins/PFC/action_planner.py +++ b/src/plugins/PFC/action_planner.py @@ -24,7 +24,10 @@ class ActionPlanner: def __init__(self, stream_id: str): self.llm = LLM_request( - model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="action_planning" + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=1000, + request_type="action_planning", ) self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2) self.name = global_config.BOT_NICKNAME diff --git a/src/plugins/PFC/pfc_KnowledgeFetcher.py b/src/plugins/PFC/pfc_KnowledgeFetcher.py index 88701304a..9c5c55076 100644 --- a/src/plugins/PFC/pfc_KnowledgeFetcher.py +++ b/src/plugins/PFC/pfc_KnowledgeFetcher.py @@ -13,7 +13,10 @@ class KnowledgeFetcher: def __init__(self): self.llm = LLM_request( - model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="knowledge_fetch" + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=1000, + request_type="knowledge_fetch", ) async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]: diff --git a/src/plugins/PFC/reply_generator.py b/src/plugins/PFC/reply_generator.py index a81cfa10c..85a067d23 100644 --- a/src/plugins/PFC/reply_generator.py +++ b/src/plugins/PFC/reply_generator.py @@ -16,7 +16,10 @@ class ReplyGenerator: def __init__(self, stream_id: str): self.llm = LLM_request( - model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=300, request_type="reply_generation" + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=300, + request_type="reply_generation", ) self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2) self.name = global_config.BOT_NICKNAME diff --git a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py index 197ad9375..46602b5d7 100644 --- a/src/plugins/chat_module/reasoning_chat/reasoning_generator.py +++ b/src/plugins/chat_module/reasoning_chat/reasoning_generator.py @@ -29,7 +29,10 @@ class ResponseGenerator: request_type="response_reasoning", ) self.model_normal = LLM_request( - model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_reasoning" + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=256, + request_type="response_reasoning", ) self.model_sum = LLM_request( diff --git a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py index 98303f975..325ecd5c6 100644 --- a/src/plugins/chat_module/think_flow_chat/think_flow_generator.py +++ b/src/plugins/chat_module/think_flow_chat/think_flow_generator.py @@ -26,7 +26,10 @@ logger = get_module_logger("llm_generator", config=llm_config) class ResponseGenerator: def __init__(self): self.model_normal = LLM_request( - model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_heartflow" + model=global_config.llm_normal, + temperature=global_config.llm_normal["temp"], + max_tokens=256, + request_type="response_heartflow", ) self.model_sum = LLM_request( @@ -49,7 +52,9 @@ class ResponseGenerator: if random.random() > 0: checked = False current_model = self.model_normal - current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高 + current_model.temperature = ( + global_config.llm_normal["temp"] * arousal_multiplier + ) # 激活度越高,温度越高 model_response = await self._generate_response_with_model( message, current_model, thinking_id, mode="normal" ) @@ -58,7 +63,9 @@ class ResponseGenerator: else: checked = True current_model = self.model_normal - current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高 + current_model.temperature = ( + global_config.llm_normal["temp"] * arousal_multiplier + ) # 激活度越高,温度越高 print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}") model_response = await self._generate_response_with_model( message, current_model, thinking_id, mode="simple" diff --git a/src/plugins/config/config.py b/src/plugins/config/config.py index bf4bfccda..d0a209d35 100644 --- a/src/plugins/config/config.py +++ b/src/plugins/config/config.py @@ -198,7 +198,7 @@ class BotConfig: heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒 observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩 compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5 - compress_length_limit: int = 5 #最多压缩份数,超过该数值的压缩上下文会被删除 + compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除 # willing willing_mode: str = "classical" # 意愿模式 @@ -447,9 +447,7 @@ class BotConfig: config.observation_context_size = heartflow_config.get( "observation_context_size", config.observation_context_size ) - config.compressed_length = heartflow_config.get( - "compressed_length", config.compressed_length - ) + config.compressed_length = heartflow_config.get("compressed_length", config.compressed_length) config.compress_length_limit = heartflow_config.get( "compress_length_limit", config.compress_length_limit ) @@ -502,7 +500,15 @@ class BotConfig: # base_url 的例子: SILICONFLOW_BASE_URL # key 的例子: SILICONFLOW_KEY - cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0, "temp": 0.7} + cfg_target = { + "name": "", + "base_url": "", + "key": "", + "stream": False, + "pri_in": 0, + "pri_out": 0, + "temp": 0.7, + } if config.INNER_VERSION in SpecifierSet("<=0.0.0"): cfg_target = cfg_item @@ -515,7 +521,7 @@ class BotConfig: stable_item.append("stream") pricing_item = ["pri_in", "pri_out"] - + # 从配置中原始拷贝稳定字段 for i in stable_item: # 如果 字段 属于计费项 且获取不到,那默认值是 0