🤖 自动格式化代码 [skip ci]

This commit is contained in:
github-actions[bot]
2025-04-14 14:28:14 +00:00
parent 200755dc8c
commit cad5011c24
7 changed files with 42 additions and 14 deletions

View File

@@ -79,7 +79,10 @@ class SubHeartflow:
self.past_mind = []
self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request(
model=global_config.llm_sub_heartflow, temperature=global_config.llm_sub_heartflow["temp"], max_tokens=600, request_type="sub_heart_flow"
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
max_tokens=600,
request_type="sub_heart_flow",
)
self.main_heartflow_info = ""

View File

@@ -24,7 +24,10 @@ class ActionPlanner:
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="action_planning"
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=1000,
request_type="action_planning",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME

View File

@@ -13,7 +13,10 @@ class KnowledgeFetcher:
def __init__(self):
self.llm = LLM_request(
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="knowledge_fetch"
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=1000,
request_type="knowledge_fetch",
)
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:

View File

@@ -16,7 +16,10 @@ class ReplyGenerator:
def __init__(self, stream_id: str):
self.llm = LLM_request(
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=300, request_type="reply_generation"
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=300,
request_type="reply_generation",
)
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
self.name = global_config.BOT_NICKNAME

View File

@@ -29,7 +29,10 @@ class ResponseGenerator:
request_type="response_reasoning",
)
self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_reasoning"
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_reasoning",
)
self.model_sum = LLM_request(

View File

@@ -26,7 +26,10 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_heartflow"
model=global_config.llm_normal,
temperature=global_config.llm_normal["temp"],
max_tokens=256,
request_type="response_heartflow",
)
self.model_sum = LLM_request(
@@ -49,7 +52,9 @@ class ResponseGenerator:
if random.random() > 0:
checked = False
current_model = self.model_normal
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
current_model.temperature = (
global_config.llm_normal["temp"] * arousal_multiplier
) # 激活度越高,温度越高
model_response = await self._generate_response_with_model(
message, current_model, thinking_id, mode="normal"
)
@@ -58,7 +63,9 @@ class ResponseGenerator:
else:
checked = True
current_model = self.model_normal
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
current_model.temperature = (
global_config.llm_normal["temp"] * arousal_multiplier
) # 激活度越高,温度越高
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
model_response = await self._generate_response_with_model(
message, current_model, thinking_id, mode="simple"

View File

@@ -447,9 +447,7 @@ class BotConfig:
config.observation_context_size = heartflow_config.get(
"observation_context_size", config.observation_context_size
)
config.compressed_length = heartflow_config.get(
"compressed_length", config.compressed_length
)
config.compressed_length = heartflow_config.get("compressed_length", config.compressed_length)
config.compress_length_limit = heartflow_config.get(
"compress_length_limit", config.compress_length_limit
)
@@ -502,7 +500,15 @@ class BotConfig:
# base_url 的例子: SILICONFLOW_BASE_URL
# key 的例子: SILICONFLOW_KEY
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0, "temp": 0.7}
cfg_target = {
"name": "",
"base_url": "",
"key": "",
"stream": False,
"pri_in": 0,
"pri_out": 0,
"temp": 0.7,
}
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
cfg_target = cfg_item