better:优化基于V3的心流效果,优化温度和prompt
This commit is contained in:
@@ -13,6 +13,9 @@ from src.common.database import db
|
|||||||
from typing import Union
|
from typing import Union
|
||||||
from src.individuality.individuality import Individuality
|
from src.individuality.individuality import Individuality
|
||||||
import random
|
import random
|
||||||
|
from src.plugins.chat.chat_stream import ChatStream
|
||||||
|
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||||
|
from src.plugins.chat.utils import get_recent_group_speaker
|
||||||
|
|
||||||
subheartflow_config = LogConfig(
|
subheartflow_config = LogConfig(
|
||||||
# 使用海马体专用样式
|
# 使用海马体专用样式
|
||||||
@@ -42,7 +45,7 @@ class SubHeartflow:
|
|||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state: CurrentState = CurrentState()
|
self.current_state: CurrentState = CurrentState()
|
||||||
self.llm_model = LLM_request(
|
self.llm_model = LLM_request(
|
||||||
model=global_config.llm_sub_heartflow, temperature=0.5, max_tokens=600, request_type="sub_heart_flow"
|
model=global_config.llm_sub_heartflow, temperature=0.3, max_tokens=600, request_type="sub_heart_flow"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.main_heartflow_info = ""
|
self.main_heartflow_info = ""
|
||||||
@@ -59,6 +62,8 @@ class SubHeartflow:
|
|||||||
|
|
||||||
self.running_knowledges = []
|
self.running_knowledges = []
|
||||||
|
|
||||||
|
self.bot_name = global_config.BOT_NICKNAME
|
||||||
|
|
||||||
def add_observation(self, observation: Observation):
|
def add_observation(self, observation: Observation):
|
||||||
"""添加一个新的observation对象到列表中,如果已存在相同id的observation则不添加"""
|
"""添加一个新的observation对象到列表中,如果已存在相同id的observation则不添加"""
|
||||||
# 查找是否存在相同id的observation
|
# 查找是否存在相同id的observation
|
||||||
@@ -106,56 +111,11 @@ class SubHeartflow:
|
|||||||
): # 5分钟无回复/不在场,销毁
|
): # 5分钟无回复/不在场,销毁
|
||||||
logger.info(f"子心流 {self.subheartflow_id} 已经5分钟没有激活,正在销毁...")
|
logger.info(f"子心流 {self.subheartflow_id} 已经5分钟没有激活,正在销毁...")
|
||||||
break # 退出循环,销毁自己
|
break # 退出循环,销毁自己
|
||||||
|
|
||||||
# async def do_a_thinking(self):
|
|
||||||
# current_thinking_info = self.current_mind
|
|
||||||
# mood_info = self.current_state.mood
|
|
||||||
|
|
||||||
# observation = self.observations[0]
|
|
||||||
# chat_observe_info = observation.observe_info
|
|
||||||
# # print(f"chat_observe_info:{chat_observe_info}")
|
|
||||||
|
|
||||||
# # 调取记忆
|
|
||||||
# related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
|
||||||
# text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
|
|
||||||
# )
|
|
||||||
|
|
||||||
# if related_memory:
|
|
||||||
# related_memory_info = ""
|
|
||||||
# for memory in related_memory:
|
|
||||||
# related_memory_info += memory[1]
|
|
||||||
# else:
|
|
||||||
# related_memory_info = ""
|
|
||||||
|
|
||||||
# # print(f"相关记忆:{related_memory_info}")
|
|
||||||
|
|
||||||
# schedule_info = bot_schedule.get_current_num_task(num=1, time_info=False)
|
|
||||||
|
|
||||||
# prompt = ""
|
|
||||||
# prompt += f"你刚刚在做的事情是:{schedule_info}\n"
|
|
||||||
# # prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
|
|
||||||
# prompt += f"你{self.personality_info}\n"
|
|
||||||
# if related_memory_info:
|
|
||||||
# prompt += f"你想起来你之前见过的回忆:{related_memory_info}。\n以上是你的回忆,不一定是目前聊天里的人说的,也不一定是现在发生的事情,请记住。\n"
|
|
||||||
# prompt += f"刚刚你的想法是{current_thinking_info}。\n"
|
|
||||||
# prompt += "-----------------------------------\n"
|
|
||||||
# prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
|
||||||
# prompt += f"你现在{mood_info}\n"
|
|
||||||
# prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
|
|
||||||
# prompt += "但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:"
|
|
||||||
# response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
|
||||||
|
|
||||||
# self.update_current_mind(response)
|
|
||||||
|
|
||||||
# self.current_mind = response
|
|
||||||
# logger.debug(f"prompt:\n{prompt}\n")
|
|
||||||
# logger.info(f"麦麦的脑内状态:{self.current_mind}")
|
|
||||||
|
|
||||||
async def do_observe(self):
|
async def do_observe(self):
|
||||||
observation = self.observations[0]
|
observation = self.observations[0]
|
||||||
await observation.observe()
|
await observation.observe()
|
||||||
|
|
||||||
async def do_thinking_before_reply(self, message_txt):
|
async def do_thinking_before_reply(self, message_txt:str, sender_name:str, chat_stream:ChatStream):
|
||||||
current_thinking_info = self.current_mind
|
current_thinking_info = self.current_mind
|
||||||
mood_info = self.current_state.mood
|
mood_info = self.current_state.mood
|
||||||
# mood_info = "你很生气,很愤怒"
|
# mood_info = "你很生气,很愤怒"
|
||||||
@@ -164,7 +124,7 @@ class SubHeartflow:
|
|||||||
# print(f"chat_observe_info:{chat_observe_info}")
|
# print(f"chat_observe_info:{chat_observe_info}")
|
||||||
|
|
||||||
# 开始构建prompt
|
# 开始构建prompt
|
||||||
prompt_personality = "你"
|
prompt_personality = f"你的名字是{self.bot_name},你"
|
||||||
# person
|
# person
|
||||||
individuality = Individuality.get_instance()
|
individuality = Individuality.get_instance()
|
||||||
|
|
||||||
@@ -179,6 +139,25 @@ class SubHeartflow:
|
|||||||
random.shuffle(identity_detail)
|
random.shuffle(identity_detail)
|
||||||
prompt_personality += f",{identity_detail[0]}"
|
prompt_personality += f",{identity_detail[0]}"
|
||||||
|
|
||||||
|
# 关系
|
||||||
|
who_chat_in_group = [
|
||||||
|
(chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
|
||||||
|
]
|
||||||
|
who_chat_in_group += get_recent_group_speaker(
|
||||||
|
chat_stream.stream_id,
|
||||||
|
(chat_stream.user_info.platform, chat_stream.user_info.user_id),
|
||||||
|
limit=global_config.MAX_CONTEXT_SIZE,
|
||||||
|
)
|
||||||
|
|
||||||
|
relation_prompt = ""
|
||||||
|
for person in who_chat_in_group:
|
||||||
|
relation_prompt += await relationship_manager.build_relationship_info(person)
|
||||||
|
|
||||||
|
relation_prompt_all = (
|
||||||
|
f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
|
||||||
|
f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
|
||||||
|
)
|
||||||
|
|
||||||
# 调取记忆
|
# 调取记忆
|
||||||
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
related_memory = await HippocampusManager.get_instance().get_memory_from_text(
|
||||||
text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
|
text=chat_observe_info, max_memory_num=2, max_memory_length=2, max_depth=3, fast_retrieval=False
|
||||||
@@ -204,6 +183,7 @@ class SubHeartflow:
|
|||||||
|
|
||||||
prompt = ""
|
prompt = ""
|
||||||
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
|
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
|
||||||
|
prompt += f"{relation_prompt_all}\n"
|
||||||
prompt += f"{prompt_personality}\n"
|
prompt += f"{prompt_personality}\n"
|
||||||
prompt += f"你刚刚在做的事情是:{schedule_info}\n"
|
prompt += f"你刚刚在做的事情是:{schedule_info}\n"
|
||||||
if related_memory_info:
|
if related_memory_info:
|
||||||
@@ -214,9 +194,10 @@ class SubHeartflow:
|
|||||||
prompt += "-----------------------------------\n"
|
prompt += "-----------------------------------\n"
|
||||||
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
||||||
prompt += f"你现在{mood_info}\n"
|
prompt += f"你现在{mood_info}\n"
|
||||||
prompt += f"你注意到有人刚刚说:{message_txt}\n"
|
prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
|
||||||
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
|
prompt += "现在你接下去继续浅浅思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
|
||||||
prompt += "记得结合上述的消息,要记得维持住你的人设,注意自己的名字,关注有人刚刚说的内容,不要思考太多:"
|
prompt += "思考时可以想想如何对群聊内容进行回复。请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),"
|
||||||
|
prompt += f"记得结合上述的消息,要记得维持住你的人设,注意你就是{self.bot_name},{self.bot_name}指的就是你。"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
@@ -235,7 +216,7 @@ class SubHeartflow:
|
|||||||
# print("麦麦回复之后脑袋转起来了")
|
# print("麦麦回复之后脑袋转起来了")
|
||||||
|
|
||||||
# 开始构建prompt
|
# 开始构建prompt
|
||||||
prompt_personality = "你"
|
prompt_personality = f"你的名字是{self.bot_name},你"
|
||||||
# person
|
# person
|
||||||
individuality = Individuality.get_instance()
|
individuality = Individuality.get_instance()
|
||||||
|
|
||||||
|
|||||||
@@ -298,7 +298,9 @@ class ThinkFlowChat:
|
|||||||
try:
|
try:
|
||||||
timer1 = time.time()
|
timer1 = time.time()
|
||||||
current_mind,past_mind = await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(
|
current_mind,past_mind = await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(
|
||||||
message.processed_plain_text
|
message_txt = message.processed_plain_text,
|
||||||
|
sender_name = message.message_info.user_info.user_nickname,
|
||||||
|
chat_stream = chat
|
||||||
)
|
)
|
||||||
timer2 = time.time()
|
timer2 = time.time()
|
||||||
timing_results["思考前脑内状态"] = timer2 - timer1
|
timing_results["思考前脑内状态"] = timer2 - timer1
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ logger = get_module_logger("llm_generator", config=llm_config)
|
|||||||
class ResponseGenerator:
|
class ResponseGenerator:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.model_normal = LLM_request(
|
self.model_normal = LLM_request(
|
||||||
model=global_config.llm_normal, temperature=0.8, max_tokens=256, request_type="response_heartflow"
|
model=global_config.llm_normal, temperature=0.6, max_tokens=256, request_type="response_heartflow"
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLM_request(
|
self.model_sum = LLM_request(
|
||||||
|
|||||||
@@ -26,30 +26,7 @@ class PromptBuilder:
|
|||||||
individuality = Individuality.get_instance()
|
individuality = Individuality.get_instance()
|
||||||
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
|
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
|
||||||
prompt_identity = individuality.get_prompt(type="identity", x_person=2, level=1)
|
prompt_identity = individuality.get_prompt(type="identity", x_person=2, level=1)
|
||||||
# 关系
|
|
||||||
who_chat_in_group = [
|
|
||||||
(chat_stream.user_info.platform, chat_stream.user_info.user_id, chat_stream.user_info.user_nickname)
|
|
||||||
]
|
|
||||||
who_chat_in_group += get_recent_group_speaker(
|
|
||||||
stream_id,
|
|
||||||
(chat_stream.user_info.platform, chat_stream.user_info.user_id),
|
|
||||||
limit=global_config.MAX_CONTEXT_SIZE,
|
|
||||||
)
|
|
||||||
|
|
||||||
relation_prompt = ""
|
|
||||||
for person in who_chat_in_group:
|
|
||||||
relation_prompt += await relationship_manager.build_relationship_info(person)
|
|
||||||
|
|
||||||
relation_prompt_all = (
|
|
||||||
f"{relation_prompt}关系等级越大,关系越好,请分析聊天记录,"
|
|
||||||
f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
|
|
||||||
)
|
|
||||||
|
|
||||||
# 心情
|
|
||||||
mood_manager = MoodManager.get_instance()
|
|
||||||
mood_prompt = mood_manager.get_prompt()
|
|
||||||
|
|
||||||
logger.info(f"心情prompt: {mood_prompt}")
|
|
||||||
|
|
||||||
# 日程构建
|
# 日程构建
|
||||||
# schedule_prompt = f'''你现在正在做的事情是:{bot_schedule.get_current_num_task(num = 1,time_info = False)}'''
|
# schedule_prompt = f'''你现在正在做的事情是:{bot_schedule.get_current_num_task(num = 1,time_info = False)}'''
|
||||||
@@ -101,18 +78,16 @@ class PromptBuilder:
|
|||||||
logger.info("开始构建prompt")
|
logger.info("开始构建prompt")
|
||||||
|
|
||||||
prompt = f"""
|
prompt = f"""
|
||||||
{relation_prompt_all}\n
|
|
||||||
{chat_target}
|
{chat_target}
|
||||||
{chat_talking_prompt}
|
{chat_talking_prompt}
|
||||||
你刚刚脑子里在想:
|
|
||||||
{current_mind_info}
|
|
||||||
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
|
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n
|
||||||
你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality} {prompt_identity}。
|
你的网名叫{global_config.BOT_NICKNAME},有人也叫你{"/".join(global_config.BOT_ALIAS_NAMES)},{prompt_personality} {prompt_identity}。
|
||||||
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
|
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
|
||||||
尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
|
你刚刚脑子里在想:
|
||||||
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话
|
{current_mind_info}
|
||||||
请注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
|
回复尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。{prompt_ger}
|
||||||
{moderation_prompt}不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。"""
|
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 ,注意只输出回复内容。
|
||||||
|
{moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。"""
|
||||||
|
|
||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ class ScheduleGenerator:
|
|||||||
# 使用离线LLM模型
|
# 使用离线LLM模型
|
||||||
self.llm_scheduler_all = LLM_request(
|
self.llm_scheduler_all = LLM_request(
|
||||||
model=global_config.llm_reasoning,
|
model=global_config.llm_reasoning,
|
||||||
temperature=global_config.SCHEDULE_TEMPERATURE,
|
temperature=global_config.SCHEDULE_TEMPERATURE+0.3,
|
||||||
max_tokens=7000,
|
max_tokens=7000,
|
||||||
request_type="schedule",
|
request_type="schedule",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ appearance = "用几句话描述外貌特征" # 外貌特征
|
|||||||
enable_schedule_gen = true # 是否启用日程表(尚未完成)
|
enable_schedule_gen = true # 是否启用日程表(尚未完成)
|
||||||
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
|
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
|
||||||
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
|
schedule_doing_update_interval = 900 # 日程表更新间隔 单位秒
|
||||||
schedule_temperature = 0.3 # 日程表温度,建议0.3-0.6
|
schedule_temperature = 0.2 # 日程表温度,建议0.2-0.5
|
||||||
time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程
|
time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运行电脑时区和国内时区不同的情况,或者模拟国外留学生日程
|
||||||
|
|
||||||
[platforms] # 必填项目,填写每个平台适配器提供的链接
|
[platforms] # 必填项目,填写每个平台适配器提供的链接
|
||||||
@@ -239,12 +239,11 @@ provider = "SILICONFLOW"
|
|||||||
pri_in = 0
|
pri_in = 0
|
||||||
pri_out = 0
|
pri_out = 0
|
||||||
|
|
||||||
[model.llm_sub_heartflow] #心流:建议使用qwen2.5 7b
|
[model.llm_sub_heartflow] #子心流:建议使用V3级别
|
||||||
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
name = "Pro/deepseek-ai/DeepSeek-V3"
|
||||||
name = "Qwen/Qwen2.5-32B-Instruct"
|
|
||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 1.26
|
pri_in = 2
|
||||||
pri_out = 1.26
|
pri_out = 8
|
||||||
|
|
||||||
[model.llm_heartflow] #心流:建议使用qwen2.5 32b
|
[model.llm_heartflow] #心流:建议使用qwen2.5 32b
|
||||||
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
|
|||||||
Reference in New Issue
Block a user