better:优化基于V3的心流效果,优化温度和prompt

This commit is contained in:
SengokuCola
2025-04-09 23:24:09 +08:00
parent f3d6e7cfa5
commit 451d0c9a32
6 changed files with 48 additions and 91 deletions

View File

@@ -298,7 +298,9 @@ class ThinkFlowChat:
try:
timer1 = time.time()
current_mind,past_mind = await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(
message.processed_plain_text
message_txt = message.processed_plain_text,
sender_name = message.message_info.user_info.user_nickname,
chat_stream = chat
)
timer2 = time.time()
timing_results["思考前脑内状态"] = timer2 - timer1

View File

@@ -25,7 +25,7 @@ logger = get_module_logger("llm_generator", config=llm_config)
class ResponseGenerator:
def __init__(self):
self.model_normal = LLM_request(
model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_heartflow"
model=global_config.llm_normal, temperature=0.6, max_tokens=256, request_type="response_heartflow"
)
self.model_sum = LLM_request(

View File

@@ -26,30 +26,7 @@ class PromptBuilder:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
prompt_identity = individuality.get_prompt(type="identity", x_person=2, level=1)
# 关系
who_chat_in_group = [
(chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
]
who_chat_in_group += get_recent_group_speaker(
stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id),
limit=global_config.MAX_CONTEXT_SIZE,
)
relation_prompt = ""
for person in who_chat_in_group:
relation_prompt += await relationship_manager.build_relationship_info(person)
relation_prompt_all = (
f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
)
# 心情
mood_manager = MoodManager.get_instance()
mood_prompt = mood_manager.get_prompt()
logger.info(f"心情prompt: {mood_prompt}")
# 日程构建
# schedule_prompt = f'''你现在正在做的事情是:{bot_schedule.get_current_num_task(num = 1,time_info = False)}'''
@@ -101,18 +78,16 @@ class PromptBuilder:
logger.info("开始构建prompt")
prompt = f"""
{relation_prompt_all}\n
{chat_target}
{chat_talking_prompt}
你刚刚脑子里在想:
{current_mind_info}
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)}{prompt_personality} {prompt_identity}
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
{moderation_prompt}不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。"""
你刚刚脑子里在想:
{current_mind_info}
回复尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 ,注意只输出回复内容。
{moderation_prompt}。注意:不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。"""
return prompt

View File

@@ -32,7 +32,7 @@ class ScheduleGenerator:
# 使用离线LLM模型
self.llm_scheduler_all = LLM_request(
model=global_config.llm_reasoning,
temperature=global_config.SCHEDULE_TEMPERATURE,
temperature=global_config.SCHEDULE_TEMPERATURE+0.3,
max_tokens=7000,
request_type="schedule",
)