🤖 自动格式化代码 [skip ci]
This commit is contained in:
@@ -79,7 +79,10 @@ class SubHeartflow:
|
|||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state: CurrentState = CurrentState()
|
self.current_state: CurrentState = CurrentState()
|
||||||
self.llm_model = LLM_request(
|
self.llm_model = LLM_request(
|
||||||
model=global_config.llm_sub_heartflow, temperature=global_config.llm_sub_heartflow["temp"], max_tokens=600, request_type="sub_heart_flow"
|
model=global_config.llm_sub_heartflow,
|
||||||
|
temperature=global_config.llm_sub_heartflow["temp"],
|
||||||
|
max_tokens=600,
|
||||||
|
request_type="sub_heart_flow",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.main_heartflow_info = ""
|
self.main_heartflow_info = ""
|
||||||
|
|||||||
@@ -24,7 +24,10 @@ class ActionPlanner:
|
|||||||
|
|
||||||
def __init__(self, stream_id: str):
|
def __init__(self, stream_id: str):
|
||||||
self.llm = LLM_request(
|
self.llm = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="action_planning"
|
model=global_config.llm_normal,
|
||||||
|
temperature=global_config.llm_normal["temp"],
|
||||||
|
max_tokens=1000,
|
||||||
|
request_type="action_planning",
|
||||||
)
|
)
|
||||||
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
||||||
self.name = global_config.BOT_NICKNAME
|
self.name = global_config.BOT_NICKNAME
|
||||||
|
|||||||
@@ -13,7 +13,10 @@ class KnowledgeFetcher:
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.llm = LLM_request(
|
self.llm = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=1000, request_type="knowledge_fetch"
|
model=global_config.llm_normal,
|
||||||
|
temperature=global_config.llm_normal["temp"],
|
||||||
|
max_tokens=1000,
|
||||||
|
request_type="knowledge_fetch",
|
||||||
)
|
)
|
||||||
|
|
||||||
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
|
async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
|
||||||
|
|||||||
@@ -16,7 +16,10 @@ class ReplyGenerator:
|
|||||||
|
|
||||||
def __init__(self, stream_id: str):
|
def __init__(self, stream_id: str):
|
||||||
self.llm = LLM_request(
|
self.llm = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=300, request_type="reply_generation"
|
model=global_config.llm_normal,
|
||||||
|
temperature=global_config.llm_normal["temp"],
|
||||||
|
max_tokens=300,
|
||||||
|
request_type="reply_generation",
|
||||||
)
|
)
|
||||||
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
self.personality_info = Individuality.get_instance().get_prompt(type="personality", x_person=2, level=2)
|
||||||
self.name = global_config.BOT_NICKNAME
|
self.name = global_config.BOT_NICKNAME
|
||||||
|
|||||||
@@ -29,7 +29,10 @@ class ResponseGenerator:
|
|||||||
request_type="response_reasoning",
|
request_type="response_reasoning",
|
||||||
)
|
)
|
||||||
self.model_normal = LLM_request(
|
self.model_normal = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_reasoning"
|
model=global_config.llm_normal,
|
||||||
|
temperature=global_config.llm_normal["temp"],
|
||||||
|
max_tokens=256,
|
||||||
|
request_type="response_reasoning",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLM_request(
|
self.model_sum = LLM_request(
|
||||||
|
|||||||
@@ -26,7 +26,10 @@ logger = get_module_logger("llm_generator", config=llm_config)
|
|||||||
class ResponseGenerator:
|
class ResponseGenerator:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.model_normal = LLM_request(
|
self.model_normal = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=global_config.llm_normal["temp"], max_tokens=256, request_type="response_heartflow"
|
model=global_config.llm_normal,
|
||||||
|
temperature=global_config.llm_normal["temp"],
|
||||||
|
max_tokens=256,
|
||||||
|
request_type="response_heartflow",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLM_request(
|
self.model_sum = LLM_request(
|
||||||
@@ -49,7 +52,9 @@ class ResponseGenerator:
|
|||||||
if random.random() > 0:
|
if random.random() > 0:
|
||||||
checked = False
|
checked = False
|
||||||
current_model = self.model_normal
|
current_model = self.model_normal
|
||||||
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
|
current_model.temperature = (
|
||||||
|
global_config.llm_normal["temp"] * arousal_multiplier
|
||||||
|
) # 激活度越高,温度越高
|
||||||
model_response = await self._generate_response_with_model(
|
model_response = await self._generate_response_with_model(
|
||||||
message, current_model, thinking_id, mode="normal"
|
message, current_model, thinking_id, mode="normal"
|
||||||
)
|
)
|
||||||
@@ -58,7 +63,9 @@ class ResponseGenerator:
|
|||||||
else:
|
else:
|
||||||
checked = True
|
checked = True
|
||||||
current_model = self.model_normal
|
current_model = self.model_normal
|
||||||
current_model.temperature = global_config.llm_normal["temp"] * arousal_multiplier # 激活度越高,温度越高
|
current_model.temperature = (
|
||||||
|
global_config.llm_normal["temp"] * arousal_multiplier
|
||||||
|
) # 激活度越高,温度越高
|
||||||
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
|
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
|
||||||
model_response = await self._generate_response_with_model(
|
model_response = await self._generate_response_with_model(
|
||||||
message, current_model, thinking_id, mode="simple"
|
message, current_model, thinking_id, mode="simple"
|
||||||
|
|||||||
@@ -198,7 +198,7 @@ class BotConfig:
|
|||||||
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
|
heart_flow_update_interval: int = 300 # 心流更新频率,间隔 单位秒
|
||||||
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
observation_context_size: int = 20 # 心流观察到的最长上下文大小,超过这个值的上下文会被压缩
|
||||||
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
compressed_length: int = 5 # 不能大于observation_context_size,心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5
|
||||||
compress_length_limit: int = 5 #最多压缩份数,超过该数值的压缩上下文会被删除
|
compress_length_limit: int = 5 # 最多压缩份数,超过该数值的压缩上下文会被删除
|
||||||
|
|
||||||
# willing
|
# willing
|
||||||
willing_mode: str = "classical" # 意愿模式
|
willing_mode: str = "classical" # 意愿模式
|
||||||
@@ -447,9 +447,7 @@ class BotConfig:
|
|||||||
config.observation_context_size = heartflow_config.get(
|
config.observation_context_size = heartflow_config.get(
|
||||||
"observation_context_size", config.observation_context_size
|
"observation_context_size", config.observation_context_size
|
||||||
)
|
)
|
||||||
config.compressed_length = heartflow_config.get(
|
config.compressed_length = heartflow_config.get("compressed_length", config.compressed_length)
|
||||||
"compressed_length", config.compressed_length
|
|
||||||
)
|
|
||||||
config.compress_length_limit = heartflow_config.get(
|
config.compress_length_limit = heartflow_config.get(
|
||||||
"compress_length_limit", config.compress_length_limit
|
"compress_length_limit", config.compress_length_limit
|
||||||
)
|
)
|
||||||
@@ -502,7 +500,15 @@ class BotConfig:
|
|||||||
|
|
||||||
# base_url 的例子: SILICONFLOW_BASE_URL
|
# base_url 的例子: SILICONFLOW_BASE_URL
|
||||||
# key 的例子: SILICONFLOW_KEY
|
# key 的例子: SILICONFLOW_KEY
|
||||||
cfg_target = {"name": "", "base_url": "", "key": "", "stream": False, "pri_in": 0, "pri_out": 0, "temp": 0.7}
|
cfg_target = {
|
||||||
|
"name": "",
|
||||||
|
"base_url": "",
|
||||||
|
"key": "",
|
||||||
|
"stream": False,
|
||||||
|
"pri_in": 0,
|
||||||
|
"pri_out": 0,
|
||||||
|
"temp": 0.7,
|
||||||
|
}
|
||||||
|
|
||||||
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
|
if config.INNER_VERSION in SpecifierSet("<=0.0.0"):
|
||||||
cfg_target = cfg_item
|
cfg_target = cfg_item
|
||||||
|
|||||||
Reference in New Issue
Block a user