大修_execute_request炸程序的问题

This commit is contained in:
UnCLAS-Prommer
2025-04-09 13:52:12 +08:00
parent a42d1b3664
commit e35c2bb9b4
9 changed files with 93 additions and 46 deletions

View File

@@ -18,7 +18,7 @@ heartflow_config = LogConfig(
logger = get_module_logger("heartflow", config=heartflow_config)
class CuttentState:
class CurrentState:
def __init__(self):
self.willing = 0
self.current_state_info = ""
@@ -34,7 +34,7 @@ class Heartflow:
def __init__(self):
self.current_mind = "你什么也没想"
self.past_mind = []
self.current_state: CuttentState = CuttentState()
self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request(
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
)
@@ -102,7 +102,11 @@ class Heartflow:
current_thinking_info = self.current_mind
mood_info = self.current_state.mood
related_memory_info = "memory"
sub_flows_info = await self.get_all_subheartflows_minds()
try:
sub_flows_info = await self.get_all_subheartflows_minds()
except Exception as e:
logger.error(f"获取子心流的想法失败: {e}")
return
schedule_info = bot_schedule.get_current_num_task(num=4, time_info=True)
@@ -111,26 +115,29 @@ class Heartflow:
prompt += f"{personality_info}\n"
prompt += f"你想起来{related_memory_info}"
prompt += f"刚刚你的主要想法是{current_thinking_info}"
prompt += f"你还有一些小想法,因为你在参加不同的群聊天,是你正在做的事情:{sub_flows_info}\n"
prompt += f"你还有一些小想法,因为你在参加不同的群聊天,是你正在做的事情:{sub_flows_info}\n"
prompt += f"你现在{mood_info}"
prompt += "现在你接下去继续思考,产生新的想法,但是要基于原有的主要想法,不要分点输出,"
prompt += "输出连贯的内心独白,不要太长,但是记得结合上述的消息,关注新内容:"
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"内心独白获取失败: {e}")
return
self.update_current_mind(response)
self.update_current_mind(reponse)
self.current_mind = reponse
self.current_mind = response
logger.info(f"麦麦的总体脑内状态:{self.current_mind}")
# logger.info("麦麦想了想,当前活动:")
# await bot_schedule.move_doing(self.current_mind)
for _, subheartflow in self._subheartflows.items():
subheartflow.main_heartflow_info = reponse
subheartflow.main_heartflow_info = response
def update_current_mind(self, reponse):
def update_current_mind(self, response):
self.past_mind.append(self.current_mind)
self.current_mind = reponse
self.current_mind = response
async def get_all_subheartflows_minds(self):
sub_minds = ""
@@ -167,9 +174,9 @@ class Heartflow:
prompt += """现在请你总结这些聊天内容,注意关注聊天内容对原有的想法的影响,输出连贯的内心独白
不要太长,但是记得结合上述的消息,要记得你的人设,关注新内容:"""
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
return reponse
return response
def create_subheartflow(self, subheartflow_id):
"""

View File

@@ -142,7 +142,11 @@ class ChattingObservation(Observation):
prompt += """以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,
以及聊天中的一些重要信息,注意识别你自己的发言,记得不要分点,不要太长,精简的概括成一段文本\n"""
prompt += "总结概括:"
self.observe_info, reasoning_content = await self.llm_summary.generate_response_async(prompt)
try:
self.observe_info, reasoning_content = await self.llm_summary.generate_response_async(prompt)
except Exception as e:
print(f"获取总结失败: {e}")
self.observe_info = ""
print(f"prompt{prompt}")
print(f"self.observe_info{self.observe_info}")

View File

@@ -22,7 +22,7 @@ subheartflow_config = LogConfig(
logger = get_module_logger("subheartflow", config=subheartflow_config)
class CuttentState:
class CurrentState:
def __init__(self):
self.willing = 0
self.current_state_info = ""
@@ -40,7 +40,7 @@ class SubHeartflow:
self.current_mind = ""
self.past_mind = []
self.current_state: CuttentState = CuttentState()
self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request(
model=global_config.llm_sub_heartflow, temperature=0.7, max_tokens=600, request_type="sub_heart_flow"
)
@@ -143,11 +143,11 @@ class SubHeartflow:
# prompt += f"你现在{mood_info}\n"
# prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
# prompt += "但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:"
# reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
# response, reasoning_content = await self.llm_model.generate_response_async(prompt)
# self.update_current_mind(reponse)
# self.update_current_mind(response)
# self.current_mind = reponse
# self.current_mind = response
# logger.debug(f"prompt:\n{prompt}\n")
# logger.info(f"麦麦的脑内状态:{self.current_mind}")
@@ -217,11 +217,14 @@ class SubHeartflow:
prompt += f"你注意到有人刚刚说:{message_txt}\n"
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
prompt += "记得结合上述的消息,要记得维持住你的人设,注意自己的名字,关注有人刚刚说的内容,不要思考太多:"
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"回复前内心独白获取失败: {e}")
response = ""
self.update_current_mind(response)
self.update_current_mind(reponse)
self.current_mind = reponse
self.current_mind = response
logger.debug(f"prompt:\n{prompt}\n")
logger.info(f"麦麦的思考前脑内状态:{self.current_mind}")
@@ -264,12 +267,14 @@ class SubHeartflow:
prompt += f"你现在{mood_info}"
prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白"
prompt += "不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,关注你回复的内容,不要思考太多:"
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"回复后内心独白获取失败: {e}")
response = ""
self.update_current_mind(response)
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
self.update_current_mind(reponse)
self.current_mind = reponse
self.current_mind = response
logger.info(f"麦麦回复后的脑内状态:{self.current_mind}")
self.last_reply_time = time.time()
@@ -302,10 +307,13 @@ class SubHeartflow:
prompt += f"你现在{mood_info}"
prompt += "现在请你思考你想不想发言或者回复请你输出一个数字1-101表示非常不想10表示非常想。"
prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
# 解析willing值
willing_match = re.search(r"<(\d+)>", response)
try:
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
# 解析willing值
willing_match = re.search(r"<(\d+)>", response)
except Exception as e:
logger.error(f"意愿判断获取失败: {e}")
willing_match = None
if willing_match:
self.current_state.willing = int(willing_match.group(1))
else:
@@ -313,9 +321,9 @@ class SubHeartflow:
return self.current_state.willing
def update_current_mind(self, reponse):
def update_current_mind(self, response):
self.past_mind.append(self.current_mind)
self.current_mind = reponse
self.current_mind = response
async def get_prompt_info(self, message: str, threshold: float):
start_time = time.time()