From 6c894238335512c251ae15deb386d80f0c3d56ab Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Wed, 16 Jul 2025 10:36:52 +0800 Subject: [PATCH 1/3] typing --- src/chat/focus_chat/heartFC_chat.py | 9 ++------- src/chat/planner_actions/planner.py | 24 +++++++++++------------- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 2ccbe82bd..49395a5d6 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -310,14 +310,9 @@ class HeartFChatting: return True else: - - if message_data: - action_message = message_data - else: - action_message = target_message + action_message: Dict[str, Any] = message_data or target_message # type: ignore + # 动作执行计时 - - with Timer("动作执行", cycle_timers): success, reply_text, command = await self._handle_action( action_type, reasoning, action_data, cycle_timers, thinking_id, action_message diff --git a/src/chat/planner_actions/planner.py b/src/chat/planner_actions/planner.py index 36798de2b..61fc2f4d6 100644 --- a/src/chat/planner_actions/planner.py +++ b/src/chat/planner_actions/planner.py @@ -1,7 +1,7 @@ import json import time import traceback -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, Tuple from rich.traceback import install from datetime import datetime from json_repair import repair_json @@ -81,13 +81,14 @@ class ActionPlanner: self.last_obs_time_mark = 0.0 def find_message_by_id(self, message_id: str, message_id_list: list) -> Optional[Dict[str, Any]]: + # sourcery skip: use-next """ 根据message_id从message_id_list中查找对应的原始消息 - + Args: message_id: 要查找的消息ID message_id_list: 消息ID列表,格式为[{'id': str, 'message': dict}, ...] - + Returns: 找到的原始消息字典,如果未找到则返回None """ @@ -98,7 +99,7 @@ class ActionPlanner: async def plan( self, mode: ChatMode = ChatMode.FOCUS - ) -> Dict[str, Dict[str, Any] | str]: # sourcery skip: dict-comprehension + ) -> Tuple[Dict[str, Dict[str, Any] | str], Optional[Dict[str, Any]]]: # sourcery skip: dict-comprehension """ 规划器 (Planner): 使用LLM根据上下文决定做出什么动作。 """ @@ -107,7 +108,8 @@ class ActionPlanner: reasoning = "规划器初始化默认" action_data = {} current_available_actions: Dict[str, ActionInfo] = {} - target_message = None # 初始化target_message变量 + target_message: Optional[Dict[str, Any]] = None # 初始化target_message变量 + prompt: str = "" try: is_group_chat = True @@ -128,10 +130,7 @@ class ActionPlanner: # 如果没有可用动作或只有no_reply动作,直接返回no_reply if not current_available_actions: - if mode == ChatMode.FOCUS: - action = "no_reply" - else: - action = "no_action" + action = "no_reply" if mode == ChatMode.FOCUS else "no_action" reasoning = "没有可用的动作" logger.info(f"{self.log_prefix}{reasoning}") return { @@ -140,7 +139,7 @@ class ActionPlanner: "action_data": action_data, "reasoning": reasoning, }, - } + }, None # --- 构建提示词 (调用修改后的 PromptBuilder 方法) --- prompt, message_id_list = await self.build_planner_prompt( @@ -196,8 +195,7 @@ class ActionPlanner: # 在FOCUS模式下,非no_reply动作需要target_message_id if mode == ChatMode.FOCUS and action != "no_reply": - target_message_id = parsed_json.get("target_message_id") - if target_message_id: + if target_message_id := parsed_json.get("target_message_id"): # 根据target_message_id查找原始消息 target_message = self.find_message_by_id(target_message_id, message_id_list) else: @@ -278,7 +276,7 @@ class ActionPlanner: if mode == ChatMode.FOCUS: by_what = "聊天内容" - target_prompt = "\n \"target_message_id\":\"触发action的消息id\"" + target_prompt = '\n "target_message_id":"触发action的消息id"' no_action_block = """重要说明1: - 'no_reply' 表示只进行不进行回复,等待合适的回复时机 - 当你刚刚发送了消息,没有人回复时,选择no_reply From a8cbb2978b8a88f3b4a66273aab3fddd09157e8d Mon Sep 17 00:00:00 2001 From: UnCLAS-Prommer Date: Wed, 16 Jul 2025 10:48:39 +0800 Subject: [PATCH 2/3] =?UTF-8?q?=E4=B8=B4=E6=97=B6=E7=9A=84energy=E6=96=B9?= =?UTF-8?q?=E5=BC=8F,=20fix=20ActivationType?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/focus_chat/heartFC_chat.py | 13 +++++++++---- src/plugins/built_in/core_actions/no_reply.py | 4 ++-- src/plugins/built_in/core_actions/plugin.py | 4 ++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/src/chat/focus_chat/heartFC_chat.py b/src/chat/focus_chat/heartFC_chat.py index 49395a5d6..4c3b97bd8 100644 --- a/src/chat/focus_chat/heartFC_chat.py +++ b/src/chat/focus_chat/heartFC_chat.py @@ -212,10 +212,14 @@ class HeartFChatting: return True if new_messages_data: + self.energy_value += 2 * len(new_messages_data) / global_config.chat.focus_value earliest_messages_data = new_messages_data[0] self.last_read_time = earliest_messages_data.get("time") - await self.normal_response(earliest_messages_data) + if await self.normal_response(earliest_messages_data): + self.energy_value += 8 / global_config.chat.focus_value + if self.energy_value >= 100: + self.loop_mode = ChatMode.FOCUS return True await asyncio.sleep(1) @@ -531,12 +535,12 @@ class HeartFChatting: f"意愿放大器更新为: {self.willing_amplifier:.2f}" ) - async def normal_response(self, message_data: dict) -> None: + async def normal_response(self, message_data: dict) -> bool: """ 处理接收到的消息。 在"兴趣"模式下,判断是否回复并生成内容。 """ - + responded = False is_mentioned = message_data.get("is_mentioned", False) interested_rate = message_data.get("interest_rate", 0.0) * self.willing_amplifier @@ -573,10 +577,11 @@ class HeartFChatting: if random.random() < reply_probability: await self.willing_manager.before_generate_reply_handle(message_data.get("message_id", "")) - await self._observe(message_data=message_data) + responded = await self._observe(message_data=message_data) # 意愿管理器:注销当前message信息 (无论是否回复,只要处理过就删除) self.willing_manager.delete(message_data.get("message_id", "")) + return responded async def _generate_response( self, message_data: dict, available_actions: Optional[Dict[str, ActionInfo]], reply_to: str diff --git a/src/plugins/built_in/core_actions/no_reply.py b/src/plugins/built_in/core_actions/no_reply.py index 2880d1ec2..246c4abf3 100644 --- a/src/plugins/built_in/core_actions/no_reply.py +++ b/src/plugins/built_in/core_actions/no_reply.py @@ -24,8 +24,8 @@ class NoReplyAction(BaseAction): 2. 累计新消息数量达到随机阈值 (默认5-10条) 则结束等待 """ - focus_activation_type = ActionActivationType.NEVER - normal_activation_type = ActionActivationType.NEVER + focus_activation_type = ActionActivationType.ALWAYS + normal_activation_type = ActionActivationType.ALWAYS mode_enable = ChatMode.FOCUS parallel_action = False diff --git a/src/plugins/built_in/core_actions/plugin.py b/src/plugins/built_in/core_actions/plugin.py index 7f635436a..11c2812d6 100644 --- a/src/plugins/built_in/core_actions/plugin.py +++ b/src/plugins/built_in/core_actions/plugin.py @@ -36,8 +36,8 @@ class ReplyAction(BaseAction): """回复动作 - 参与聊天回复""" # 激活设置 - focus_activation_type = ActionActivationType.NEVER - normal_activation_type = ActionActivationType.NEVER + focus_activation_type = ActionActivationType.ALWAYS + normal_activation_type = ActionActivationType.ALWAYS mode_enable = ChatMode.FOCUS parallel_action = False From c71f2b21c064564631b960ecbbca6f25cfcae08d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com> Date: Wed, 16 Jul 2025 11:00:16 +0800 Subject: [PATCH 3/3] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E5=90=8C?= =?UTF-8?q?=E6=AD=A5=E8=8E=B7=E5=8F=96embedding=E5=90=91=E9=87=8F=E5=92=8C?= =?UTF-8?q?=E7=94=9F=E6=88=90=E5=93=8D=E5=BA=94=E7=9A=84=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/chat/knowledge/embedding_store.py | 4 ++-- src/chat/knowledge/ie_process.py | 4 ++-- src/chat/knowledge/qa_manager.py | 4 ++-- src/chat/utils/utils.py | 12 ++++++++++++ src/llm_models/utils_model.py | 23 +++++++++++++++++++++++ 5 files changed, 41 insertions(+), 6 deletions(-) diff --git a/src/chat/knowledge/embedding_store.py b/src/chat/knowledge/embedding_store.py index 3eb466d21..2cb9fbdfb 100644 --- a/src/chat/knowledge/embedding_store.py +++ b/src/chat/knowledge/embedding_store.py @@ -26,7 +26,7 @@ from rich.progress import ( TextColumn, ) from src.manager.local_store_manager import local_storage -from src.chat.utils.utils import get_embedding +from src.chat.utils.utils import get_embedding_sync from src.config.config import global_config @@ -99,7 +99,7 @@ class EmbeddingStore: self.idx2hash = None def _get_embedding(self, s: str) -> List[float]: - return get_embedding(s) + return get_embedding_sync(s) def get_test_file_path(self): return EMBEDDING_TEST_FILE diff --git a/src/chat/knowledge/ie_process.py b/src/chat/knowledge/ie_process.py index bd0e17684..a6f72eb52 100644 --- a/src/chat/knowledge/ie_process.py +++ b/src/chat/knowledge/ie_process.py @@ -28,7 +28,7 @@ def _extract_json_from_text(text: str) -> dict: def _entity_extract(llm_req: LLMRequest, paragraph: str) -> List[str]: """对段落进行实体提取,返回提取出的实体列表(JSON格式)""" entity_extract_context = prompt_template.build_entity_extract_context(paragraph) - response, (reasoning_content, model_name) = llm_req.generate_response_async(entity_extract_context) + response, (reasoning_content, model_name) = llm_req.generate_response_sync(entity_extract_context) entity_extract_result = _extract_json_from_text(response) # 尝试load JSON数据 @@ -50,7 +50,7 @@ def _rdf_triple_extract(llm_req: LLMRequest, paragraph: str, entities: list) -> rdf_extract_context = prompt_template.build_rdf_triple_extract_context( paragraph, entities=json.dumps(entities, ensure_ascii=False) ) - response, (reasoning_content, model_name) = llm_req.generate_response_async(rdf_extract_context) + response, (reasoning_content, model_name) = llm_req.generate_response_sync(rdf_extract_context) entity_extract_result = _extract_json_from_text(response) # 尝试load JSON数据 diff --git a/src/chat/knowledge/qa_manager.py b/src/chat/knowledge/qa_manager.py index c83683b79..b4a0dc1fc 100644 --- a/src/chat/knowledge/qa_manager.py +++ b/src/chat/knowledge/qa_manager.py @@ -10,7 +10,7 @@ from .kg_manager import KGManager # from .lpmmconfig import global_config from .utils.dyn_topk import dyn_select_top_k from src.llm_models.utils_model import LLMRequest -from src.chat.utils.utils import get_embedding +from src.chat.utils.utils import get_embedding_sync from src.config.config import global_config MAX_KNOWLEDGE_LENGTH = 10000 # 最大知识长度 @@ -36,7 +36,7 @@ class QAManager: # 生成问题的Embedding part_start_time = time.perf_counter() - question_embedding = await get_embedding(question) + question_embedding = await get_embedding_sync(question) if question_embedding is None: logger.error("生成问题Embedding失败") return None diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py index a329b3548..045e9e911 100644 --- a/src/chat/utils/utils.py +++ b/src/chat/utils/utils.py @@ -122,6 +122,18 @@ async def get_embedding(text, request_type="embedding"): return embedding +def get_embedding_sync(text, request_type="embedding"): + """获取文本的embedding向量(同步版本)""" + # TODO: API-Adapter修改标记 + llm = LLMRequest(model=global_config.model.embedding, request_type=request_type) + try: + embedding = llm.get_embedding_sync(text) + except Exception as e: + logger.error(f"获取embedding失败: {str(e)}") + embedding = None + return embedding + + def get_recent_group_speaker(chat_stream_id: str, sender, limit: int = 12) -> list: # 获取当前群聊记录内发言的人 filter_query = {"chat_id": chat_stream_id} diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 1077cfa09..e2e37fdbd 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -827,6 +827,29 @@ class LLMRequest: ) return embedding + def get_embedding_sync(self, text: str) -> Union[list, None]: + """同步方法:获取文本的embedding向量 + + Args: + text: 需要获取embedding的文本 + + Returns: + list: embedding向量,如果失败则返回None + """ + return asyncio.run(self.get_embedding(text)) + + def generate_response_sync(self, prompt: str, **kwargs) -> Union[str, Tuple]: + """同步方式根据输入的提示生成模型的响应 + + Args: + prompt: 输入的提示文本 + **kwargs: 额外的参数 + + Returns: + Union[str, Tuple]: 模型响应内容,如果有工具调用则返回元组 + """ + return asyncio.run(self.generate_response_async(prompt, **kwargs)) + def compress_base64_image_by_scale(base64_data: str, target_size: int = 0.8 * 1024 * 1024) -> str: """压缩base64格式的图片到指定大小