diff --git a/src/heart_flow/heartflow.py b/src/heart_flow/heartflow.py index 5c67fe125..f5b394f2e 100644 --- a/src/heart_flow/heartflow.py +++ b/src/heart_flow/heartflow.py @@ -18,7 +18,7 @@ heartflow_config = LogConfig( logger = get_module_logger("heartflow", config=heartflow_config) -class CuttentState: +class CurrentState: def __init__(self): self.willing = 0 self.current_state_info = "" @@ -34,7 +34,7 @@ class Heartflow: def __init__(self): self.current_mind = "你什么也没想" self.past_mind = [] - self.current_state: CuttentState = CuttentState() + self.current_state: CurrentState = CurrentState() self.llm_model = LLM_request( model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow" ) @@ -102,7 +102,11 @@ class Heartflow: current_thinking_info = self.current_mind mood_info = self.current_state.mood related_memory_info = "memory" - sub_flows_info = await self.get_all_subheartflows_minds() + try: + sub_flows_info = await self.get_all_subheartflows_minds() + except Exception as e: + logger.error(f"获取子心流的想法失败: {e}") + return schedule_info = bot_schedule.get_current_num_task(num=4, time_info=True) @@ -111,26 +115,29 @@ class Heartflow: prompt += f"{personality_info}\n" prompt += f"你想起来{related_memory_info}。" prompt += f"刚刚你的主要想法是{current_thinking_info}。" - prompt += f"你还有一些小想法,因为你在参加不同的群聊天,是你正在做的事情:{sub_flows_info}\n" + prompt += f"你还有一些小想法,因为你在参加不同的群聊天,这是你正在做的事情:{sub_flows_info}\n" prompt += f"你现在{mood_info}。" prompt += "现在你接下去继续思考,产生新的想法,但是要基于原有的主要想法,不要分点输出," prompt += "输出连贯的内心独白,不要太长,但是记得结合上述的消息,关注新内容:" - reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) + try: + response, reasoning_content = await self.llm_model.generate_response_async(prompt) + except Exception as e: + logger.error(f"内心独白获取失败: {e}") + return + self.update_current_mind(response) - self.update_current_mind(reponse) - - self.current_mind = reponse + self.current_mind = response logger.info(f"麦麦的总体脑内状态:{self.current_mind}") # logger.info("麦麦想了想,当前活动:") # await bot_schedule.move_doing(self.current_mind) for _, subheartflow in self._subheartflows.items(): - subheartflow.main_heartflow_info = reponse + subheartflow.main_heartflow_info = response - def update_current_mind(self, reponse): + def update_current_mind(self, response): self.past_mind.append(self.current_mind) - self.current_mind = reponse + self.current_mind = response async def get_all_subheartflows_minds(self): sub_minds = "" @@ -167,9 +174,9 @@ class Heartflow: prompt += """现在请你总结这些聊天内容,注意关注聊天内容对原有的想法的影响,输出连贯的内心独白 不要太长,但是记得结合上述的消息,要记得你的人设,关注新内容:""" - reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) + response, reasoning_content = await self.llm_model.generate_response_async(prompt) - return reponse + return response def create_subheartflow(self, subheartflow_id): """ diff --git a/src/heart_flow/observation.py b/src/heart_flow/observation.py index 5befd7322..78cb9ef67 100644 --- a/src/heart_flow/observation.py +++ b/src/heart_flow/observation.py @@ -142,7 +142,11 @@ class ChattingObservation(Observation): prompt += """以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容, 以及聊天中的一些重要信息,注意识别你自己的发言,记得不要分点,不要太长,精简的概括成一段文本\n""" prompt += "总结概括:" - self.observe_info, reasoning_content = await self.llm_summary.generate_response_async(prompt) + try: + self.observe_info, reasoning_content = await self.llm_summary.generate_response_async(prompt) + except Exception as e: + print(f"获取总结失败: {e}") + self.observe_info = "" print(f"prompt:{prompt}") print(f"self.observe_info:{self.observe_info}") diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py index 83f505cf8..a2a4c0bbf 100644 --- a/src/heart_flow/sub_heartflow.py +++ b/src/heart_flow/sub_heartflow.py @@ -22,7 +22,7 @@ subheartflow_config = LogConfig( logger = get_module_logger("subheartflow", config=subheartflow_config) -class CuttentState: +class CurrentState: def __init__(self): self.willing = 0 self.current_state_info = "" @@ -40,7 +40,7 @@ class SubHeartflow: self.current_mind = "" self.past_mind = [] - self.current_state: CuttentState = CuttentState() + self.current_state: CurrentState = CurrentState() self.llm_model = LLM_request( model=global_config.llm_sub_heartflow, temperature=0.5, max_tokens=600, request_type="sub_heart_flow" ) @@ -143,11 +143,11 @@ class SubHeartflow: # prompt += f"你现在{mood_info}\n" # prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长," # prompt += "但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:" - # reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) + # response, reasoning_content = await self.llm_model.generate_response_async(prompt) - # self.update_current_mind(reponse) + # self.update_current_mind(response) - # self.current_mind = reponse + # self.current_mind = response # logger.debug(f"prompt:\n{prompt}\n") # logger.info(f"麦麦的脑内状态:{self.current_mind}") @@ -217,9 +217,15 @@ class SubHeartflow: prompt += f"你注意到有人刚刚说:{message_txt}\n" prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长," prompt += "记得结合上述的消息,要记得维持住你的人设,注意自己的名字,关注有人刚刚说的内容,不要思考太多:" - reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) - self.update_current_mind(reponse) + try: + response, reasoning_content = await self.llm_model.generate_response_async(prompt) + except Exception as e: + logger.error(f"回复前内心独白获取失败: {e}") + response = "" + self.update_current_mind(response) + + self.current_mind = response logger.debug(f"prompt:\n{prompt}\n") logger.info(f"麦麦的思考前脑内状态:{self.current_mind}") @@ -264,12 +270,14 @@ class SubHeartflow: prompt += f"你现在{mood_info}" prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白" prompt += "不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,关注你回复的内容,不要思考太多:" + try: + response, reasoning_content = await self.llm_model.generate_response_async(prompt) + except Exception as e: + logger.error(f"回复后内心独白获取失败: {e}") + response = "" + self.update_current_mind(response) - reponse, reasoning_content = await self.llm_model.generate_response_async(prompt) - - self.update_current_mind(reponse) - - self.current_mind = reponse + self.current_mind = response logger.info(f"麦麦回复后的脑内状态:{self.current_mind}") self.last_reply_time = time.time() @@ -302,10 +310,13 @@ class SubHeartflow: prompt += f"你现在{mood_info}。" prompt += "现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。" prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复" - - response, reasoning_content = await self.llm_model.generate_response_async(prompt) - # 解析willing值 - willing_match = re.search(r"<(\d+)>", response) + try: + response, reasoning_content = await self.llm_model.generate_response_async(prompt) + # 解析willing值 + willing_match = re.search(r"<(\d+)>", response) + except Exception as e: + logger.error(f"意愿判断获取失败: {e}") + willing_match = None if willing_match: self.current_state.willing = int(willing_match.group(1)) else: @@ -313,9 +324,9 @@ class SubHeartflow: return self.current_state.willing - def update_current_mind(self, reponse): + def update_current_mind(self, response): self.past_mind.append(self.current_mind) - self.current_mind = reponse + self.current_mind = response async def get_prompt_info(self, message: str, threshold: float): start_time = time.time() diff --git a/src/plugins/PFC/pfc.py b/src/plugins/PFC/pfc.py index 3436dce8f..c88ed47d5 100644 --- a/src/plugins/PFC/pfc.py +++ b/src/plugins/PFC/pfc.py @@ -114,9 +114,12 @@ class GoalAnalyzer: }}""" logger.debug(f"发送到LLM的提示词: {prompt}") - content, _ = await self.llm.generate_response_async(prompt) - logger.debug(f"LLM原始返回内容: {content}") - + try: + content, _ = await self.llm.generate_response_async(prompt) + logger.debug(f"LLM原始返回内容: {content}") + except Exception as e: + logger.error(f"分析对话目标时出错: {str(e)}") + content = "" # 使用简化函数提取JSON内容 success, result = get_items_from_json( content, "goal", "reasoning", required_types={"goal": str, "reasoning": str} diff --git a/src/plugins/chat/emoji_manager.py b/src/plugins/chat/emoji_manager.py index 6d070c83f..de3a5a54d 100644 --- a/src/plugins/chat/emoji_manager.py +++ b/src/plugins/chat/emoji_manager.py @@ -340,6 +340,9 @@ class EmojiManager: if description is not None: embedding = await get_embedding(description, request_type="emoji") + if not embedding: + logger.error("获取消息嵌入向量失败") + raise ValueError("获取消息嵌入向量失败") # 准备数据库记录 emoji_record = { "filename": filename, diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py index b7cc32e2f..b7986ae3e 100644 --- a/src/plugins/chat/utils.py +++ b/src/plugins/chat/utils.py @@ -79,7 +79,13 @@ async def get_embedding(text, request_type="embedding"): """获取文本的embedding向量""" llm = LLM_request(model=global_config.embedding, request_type=request_type) # return llm.get_embedding_sync(text) - return await llm.get_embedding(text) + try: + embedding = await llm.get_embedding(text) + except Exception as e: + logger.error(f"获取embedding失败: {str(e)}") + embedding = None + return embedding + async def get_recent_group_messages(chat_id: str, limit: int = 12) -> list: diff --git a/src/plugins/memory_system/Hippocampus.py b/src/plugins/memory_system/Hippocampus.py index 717cebe17..8e2cd21e7 100644 --- a/src/plugins/memory_system/Hippocampus.py +++ b/src/plugins/memory_system/Hippocampus.py @@ -1316,15 +1316,24 @@ class HippocampusManager: """从文本中获取相关记忆的公共接口""" if not self._initialized: raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法") - return await self._hippocampus.get_memory_from_text( - text, max_memory_num, max_memory_length, max_depth, fast_retrieval - ) + try: + response = await self._hippocampus.get_memory_from_text(text, max_memory_num, max_memory_length, max_depth, fast_retrieval) + except Exception as e: + logger.error(f"文本激活记忆失败: {e}") + response = [] + return response + async def get_activate_from_text(self, text: str, max_depth: int = 3, fast_retrieval: bool = False) -> float: """从文本中获取激活值的公共接口""" if not self._initialized: raise RuntimeError("HippocampusManager 尚未初始化,请先调用 initialize 方法") - return await self._hippocampus.get_activate_from_text(text, max_depth, fast_retrieval) + try: + response = await self._hippocampus.get_activate_from_text(text, max_depth, fast_retrieval) + except Exception as e: + logger.error(f"文本产生激活值失败: {e}") + response = 0.0 + return response def get_memory_from_keyword(self, keyword: str, max_depth: int = 2) -> list: """从关键词获取相关记忆的公共接口""" diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py index ccab662d1..23b898f7d 100644 --- a/src/plugins/schedule/schedule_generator.py +++ b/src/plugins/schedule/schedule_generator.py @@ -121,7 +121,11 @@ class ScheduleGenerator: self.today_done_list = [] if not self.today_schedule_text: logger.info(f"{today.strftime('%Y-%m-%d')}的日程不存在,准备生成新的日程") - self.today_schedule_text = await self.generate_daily_schedule(target_date=today) + try: + self.today_schedule_text = await self.generate_daily_schedule(target_date=today) + except Exception as e: + logger.error(f"生成日程时发生错误: {str(e)}") + self.today_schedule_text = "" self.save_today_schedule_to_db() diff --git a/src/plugins/topic_identify/topic_identifier.py b/src/plugins/topic_identify/topic_identifier.py index 39b985d7c..743e45870 100644 --- a/src/plugins/topic_identify/topic_identifier.py +++ b/src/plugins/topic_identify/topic_identifier.py @@ -29,10 +29,13 @@ class TopicIdentifier: 消息内容:{text}""" # 使用 LLM_request 类进行请求 - topic, _, _ = await self.llm_topic_judge.generate_response(prompt) - + try: + topic, _, _ = await self.llm_topic_judge.generate_response(prompt) + except Exception as e: + logger.error(f"LLM 请求topic失败: {e}") + return None if not topic: - logger.error("LLM API 返回为空") + logger.error("LLM 得到的topic为空") return None # 直接在这里处理主题解析