fix:修复running memory 错误
This commit is contained in:
@@ -376,9 +376,7 @@ class HeartFChatting:
|
||||
if acquired and self._processing_lock.locked():
|
||||
self._processing_lock.release()
|
||||
|
||||
async def _process_processors(
|
||||
self, observations: List[Observation], running_memorys: List[Dict[str, Any]]
|
||||
) -> tuple[List[InfoBase], Dict[str, float]]:
|
||||
async def _process_processors(self, observations: List[Observation]) -> tuple[List[InfoBase], Dict[str, float]]:
|
||||
# 记录并行任务开始时间
|
||||
parallel_start_time = time.time()
|
||||
logger.debug(f"{self.log_prefix} 开始信息处理器并行任务")
|
||||
@@ -392,7 +390,7 @@ class HeartFChatting:
|
||||
|
||||
async def run_with_timeout(proc=processor):
|
||||
return await asyncio.wait_for(
|
||||
proc.process_info(observations=observations, running_memorys=running_memorys),
|
||||
proc.process_info(observations=observations),
|
||||
timeout=global_config.focus_chat.processor_max_time,
|
||||
)
|
||||
|
||||
@@ -479,7 +477,7 @@ class HeartFChatting:
|
||||
# 创建三个并行任务
|
||||
action_modify_task = asyncio.create_task(modify_actions_task())
|
||||
memory_task = asyncio.create_task(self.memory_activator.activate_memory(self.observations))
|
||||
processor_task = asyncio.create_task(self._process_processors(self.observations, []))
|
||||
processor_task = asyncio.create_task(self._process_processors(self.observations))
|
||||
|
||||
# 等待三个任务完成
|
||||
_, running_memorys, (all_plan_info, processor_time_costs) = await asyncio.gather(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Any, Optional, Dict
|
||||
from typing import List, Any
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.common.logger import get_logger
|
||||
@@ -23,8 +23,7 @@ class BaseProcessor(ABC):
|
||||
@abstractmethod
|
||||
async def process_info(
|
||||
self,
|
||||
observations: Optional[List[Observation]] = None,
|
||||
running_memorys: Optional[List[Dict]] = None,
|
||||
observations: List[Observation] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象的抽象方法
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List, Optional, Any
|
||||
from typing import List, Any
|
||||
from src.chat.focus_chat.info.obs_info import ObsInfo
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.info.info_base import InfoBase
|
||||
@@ -8,7 +8,6 @@ from src.chat.heart_flow.observation.chatting_observation import ChattingObserva
|
||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||
from src.chat.focus_chat.info.cycle_info import CycleInfo
|
||||
from datetime import datetime
|
||||
from typing import Dict
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
|
||||
@@ -35,8 +34,7 @@ class ChattingInfoProcessor(BaseProcessor):
|
||||
|
||||
async def process_info(
|
||||
self,
|
||||
observations: Optional[List[Observation]] = None,
|
||||
running_memorys: Optional[List[Dict]] = None,
|
||||
observations: List[Observation] = None,
|
||||
**kwargs: Any,
|
||||
) -> List[InfoBase]:
|
||||
"""处理Observation对象
|
||||
|
||||
@@ -110,7 +110,7 @@ class MindProcessor(BaseProcessor):
|
||||
logger.debug(f"{self.log_prefix} 更新 structured_info_str: \n{self.structured_info_str}")
|
||||
|
||||
async def process_info(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
|
||||
self, observations: List[Observation] = None,
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象
|
||||
|
||||
@@ -120,7 +120,7 @@ class MindProcessor(BaseProcessor):
|
||||
Returns:
|
||||
List[InfoBase]: 处理后的结构化信息列表
|
||||
"""
|
||||
current_mind = await self.do_thinking_before_reply(observations, running_memorys)
|
||||
current_mind = await self.do_thinking_before_reply(observations)
|
||||
|
||||
mind_info = MindInfo()
|
||||
mind_info.set_current_mind(current_mind)
|
||||
@@ -128,7 +128,7 @@ class MindProcessor(BaseProcessor):
|
||||
return [mind_info]
|
||||
|
||||
async def do_thinking_before_reply(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None
|
||||
self, observations: List[Observation] = None
|
||||
):
|
||||
"""
|
||||
在回复前进行思考,生成内心想法并收集工具调用结果
|
||||
@@ -157,13 +157,6 @@ class MindProcessor(BaseProcessor):
|
||||
logger.debug(
|
||||
f"{self.log_prefix} 当前完整的 structured_info: {safe_json_dumps(self.structured_info, ensure_ascii=False)}"
|
||||
)
|
||||
|
||||
memory_str = ""
|
||||
if running_memorys:
|
||||
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
|
||||
for running_memory in running_memorys:
|
||||
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
|
||||
|
||||
# ---------- 1. 准备基础数据 ----------
|
||||
# 获取现有想法和情绪状态
|
||||
previous_mind = self.current_mind if self.current_mind else ""
|
||||
@@ -203,7 +196,6 @@ class MindProcessor(BaseProcessor):
|
||||
|
||||
prompt = (await global_prompt_manager.get_prompt_async(template_name)).format(
|
||||
bot_name=get_individuality().name,
|
||||
memory_str=memory_str,
|
||||
extra_info=self.structured_info_str,
|
||||
relation_prompt=relation_prompt,
|
||||
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
|
||||
@@ -115,7 +115,7 @@ class RelationshipProcessor(BaseProcessor):
|
||||
self.log_prefix = f"[{name}] "
|
||||
|
||||
async def process_info(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
|
||||
self, observations: List[Observation] = None, *infos
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象
|
||||
|
||||
@@ -138,7 +138,7 @@ class RelationshipProcessor(BaseProcessor):
|
||||
|
||||
async def relation_identify(
|
||||
self,
|
||||
observations: Optional[List[Observation]] = None,
|
||||
observations: List[Observation] = None,
|
||||
):
|
||||
"""
|
||||
在回复前进行思考,生成内心想法并收集工具调用结果
|
||||
|
||||
@@ -63,7 +63,7 @@ class SelfProcessor(BaseProcessor):
|
||||
self.log_prefix = f"[{name}] "
|
||||
|
||||
async def process_info(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
|
||||
self, observations: List[Observation] = None, *infos
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象
|
||||
|
||||
@@ -73,7 +73,7 @@ class SelfProcessor(BaseProcessor):
|
||||
Returns:
|
||||
List[InfoBase]: 处理后的结构化信息列表
|
||||
"""
|
||||
self_info_str = await self.self_indentify(observations, running_memorys)
|
||||
self_info_str = await self.self_indentify(observations)
|
||||
|
||||
if self_info_str:
|
||||
self_info = SelfInfo()
|
||||
@@ -85,7 +85,7 @@ class SelfProcessor(BaseProcessor):
|
||||
return [self_info]
|
||||
|
||||
async def self_indentify(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None
|
||||
self, observations: List[Observation] = None,
|
||||
):
|
||||
"""
|
||||
在回复前进行思考,生成内心想法并收集工具调用结果
|
||||
|
||||
@@ -8,7 +8,7 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.tools.tool_use import ToolUser
|
||||
from src.chat.utils.json_utils import process_llm_tool_calls
|
||||
from .base_processor import BaseProcessor
|
||||
from typing import List, Optional, Dict
|
||||
from typing import List, Optional
|
||||
from src.chat.heart_flow.observation.observation import Observation
|
||||
from src.chat.focus_chat.info.structured_info import StructuredInfo
|
||||
from src.chat.heart_flow.observation.structure_observation import StructureObservation
|
||||
@@ -48,7 +48,7 @@ class ToolProcessor(BaseProcessor):
|
||||
self.structured_info = []
|
||||
|
||||
async def process_info(
|
||||
self, observations: Optional[List[Observation]] = None, running_memories: Optional[List[Dict]] = None, *infos
|
||||
self, observations: Optional[List[Observation]] = None
|
||||
) -> List[StructuredInfo]:
|
||||
"""处理信息对象
|
||||
|
||||
@@ -67,7 +67,7 @@ class ToolProcessor(BaseProcessor):
|
||||
if observations:
|
||||
for observation in observations:
|
||||
if isinstance(observation, ChattingObservation):
|
||||
result, used_tools, prompt = await self.execute_tools(observation, running_memories)
|
||||
result, used_tools, prompt = await self.execute_tools(observation)
|
||||
|
||||
logger.debug(f"工具调用结果: {result}")
|
||||
# 更新WorkingObservation中的结构化信息
|
||||
@@ -87,7 +87,7 @@ class ToolProcessor(BaseProcessor):
|
||||
|
||||
return [structured_info]
|
||||
|
||||
async def execute_tools(self, observation: ChattingObservation, running_memorys: Optional[List[Dict]] = None):
|
||||
async def execute_tools(self, observation: ChattingObservation):
|
||||
"""
|
||||
并行执行工具,返回结构化信息
|
||||
|
||||
@@ -117,19 +117,12 @@ class ToolProcessor(BaseProcessor):
|
||||
chat_observe_info = observation.get_observe_info()
|
||||
# person_list = observation.person_list
|
||||
|
||||
memory_str = ""
|
||||
if running_memorys:
|
||||
memory_str = "以下是当前在聊天中,你回忆起的记忆:\n"
|
||||
for running_memory in running_memorys:
|
||||
memory_str += f"{running_memory['topic']}: {running_memory['content']}\n"
|
||||
|
||||
# 获取时间信息
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
# 构建专用于工具调用的提示词
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"tool_executor_prompt",
|
||||
memory_str=memory_str,
|
||||
chat_observe_info=chat_observe_info,
|
||||
is_group_chat=is_group_chat,
|
||||
bot_name=get_individuality().name,
|
||||
|
||||
@@ -68,7 +68,7 @@ class WorkingMemoryProcessor(BaseProcessor):
|
||||
self.log_prefix = f"[{name}] "
|
||||
|
||||
async def process_info(
|
||||
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
|
||||
self, observations: List[Observation] = None, *infos
|
||||
) -> List[InfoBase]:
|
||||
"""处理信息对象
|
||||
|
||||
|
||||
Reference in New Issue
Block a user