fix:修复一些小问题

This commit is contained in:
SengokuCola
2025-05-12 21:25:44 +08:00
parent 900ce21175
commit 65fab13f4f
5 changed files with 24 additions and 19 deletions

View File

@@ -79,12 +79,13 @@ class DefaultExpressor:
action_data: Dict[str, Any], action_data: Dict[str, Any],
reasoning: str, reasoning: str,
anchor_message: MessageRecv, anchor_message: MessageRecv,
) -> tuple[bool, str]: ) -> tuple[bool, Optional[List[str]]]:
# 创建思考消息 # 创建思考消息
thinking_id = await self._create_thinking_message(anchor_message) thinking_id = await self._create_thinking_message(anchor_message)
if not thinking_id: if not thinking_id:
raise Exception("无法创建思考消息") raise Exception("无法创建思考消息")
reply = None # 初始化 reply防止未定义
try: try:
has_sent_something = False has_sent_something = False
@@ -124,7 +125,7 @@ class DefaultExpressor:
except Exception as e: except Exception as e:
logger.error(f"回复失败: {e}") logger.error(f"回复失败: {e}")
return False, thinking_id return False, None
# --- 回复器 (Replier) 的定义 --- # # --- 回复器 (Replier) 的定义 --- #
@@ -142,8 +143,8 @@ class DefaultExpressor:
try: try:
# 1. 获取情绪影响因子并调整模型温度 # 1. 获取情绪影响因子并调整模型温度
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier() arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
current_temp = global_config.llm_normal["temp"] * arousal_multiplier current_temp = float(global_config.llm_normal["temp"]) * arousal_multiplier
self.express_model.temperature = current_temp # 动态调整温度 self.express_model.params["temperature"] = current_temp # 动态调整温度
# 2. 获取信息捕捉器 # 2. 获取信息捕捉器
info_catcher = info_catcher_manager.get_info_catcher(thinking_id) info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
@@ -217,11 +218,14 @@ class DefaultExpressor:
self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str self, anchor_message: Optional[MessageRecv], response_set: List[str], thinking_id: str
) -> Optional[MessageSending]: ) -> Optional[MessageSending]:
"""发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender""" """发送回复消息 (尝试锚定到 anchor_message),使用 HeartFCSender"""
if not anchor_message or not anchor_message.chat_stream: chat = self.chat_stream
logger.error(f"{self.log_prefix} 无法发送回复,缺少有效的锚点消息或聊天流。") if chat is None:
logger.error(f"{self.log_prefix} 无法发送回复chat_stream 为空。")
return None
if not anchor_message:
logger.error(f"{self.log_prefix} 无法发送回复anchor_message 为空。")
return None return None
chat = self.chat_stream
chat_id = self.chat_id chat_id = self.chat_id
stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志 stream_name = chat_manager.get_stream_name(chat_id) or chat_id # 获取流名称用于日志

View File

@@ -25,8 +25,9 @@ def init_prompt() -> None:
例如: 例如:
"表示十分惊叹"时,使用"我嘞个xxxx" "表示十分惊叹"时,使用"我嘞个xxxx"
"表示讽刺的赞同,不想讲道理"时,使用"对对对" "表示讽刺的赞同,不想讲道理"时,使用"对对对"
"想表达某个观点,但不想明说",使用"反讽" "想表达某个观点,但不想明说",使用"反讽的句式"
"想说明某个观点,但懒得明说",使用"懂的都懂" "想说明某个观点,但懒得明说",使用"懂的都懂"
"想搞笑的表现高深的感觉",使用"文言文句式"
现在请你概括 现在请你概括
""" """

View File

@@ -442,7 +442,7 @@ class HeartFChatting:
pending = set(tasks.values()) pending = set(tasks.values())
# 等待所有任务完成,同时追踪每个任务的完成情况 # 等待所有任务完成,同时追踪每个任务的完成情况
results = {} results: dict[str, list[InfoBase]] = {}
while pending: while pending:
# 等待任务完成 # 等待任务完成
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED, timeout=1.0) done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED, timeout=1.0)
@@ -467,12 +467,9 @@ class HeartFChatting:
) )
# 所有任务完成,从结果中提取数据 # 所有任务完成,从结果中提取数据
mind_processed_infos = [] mind_processed_infos = results.get("思考任务", [])
tool_processed_infos = [] tool_processed_infos = results.get("工具任务", [])
chatting_info_processed_infos = [] chatting_info_processed_infos = results.get("聊天信息处理任务", [])
mind_processed_infos = results.get("思考任务")
tool_processed_infos = results.get("工具任务")
chatting_info_processed_infos = results.get("聊天信息处理任务")
# 记录总耗时 # 记录总耗时
parallel_end_time = time.time() parallel_end_time = time.time()

View File

@@ -50,7 +50,7 @@ def init_prompt():
{chat_info} {chat_info}
你需要学习聊天内容中其他人的回复风格,并结合到你的回复中 需要了解聊天记录中的内容就好
{chat_target} {chat_target}
你的名字是{bot_name}{prompt_personality},你想表达:{in_mind_reply},原因是:{reason} 你的名字是{bot_name}{prompt_personality},你想表达:{in_mind_reply},原因是:{reason}
@@ -373,7 +373,7 @@ class PromptBuilder:
in_mind_reply=None, in_mind_reply=None,
) -> Optional[str]: ) -> Optional[str]:
if build_mode == "normal": if build_mode == "normal":
return await self._build_prompt_normal(chat_stream, message_txt, sender_name) return await self._build_prompt_normal(chat_stream, message_txt or "", sender_name)
elif build_mode == "focus": elif build_mode == "focus":
return await _build_prompt_focus( return await _build_prompt_focus(

View File

@@ -22,6 +22,7 @@ from src.plugins.heartFC_chat.info_processors.processor_utils import (
get_spark, get_spark,
) )
from typing import Dict from typing import Dict
from src.heart_flow.info.info_base import InfoBase
logger = get_logger("sub_heartflow") logger = get_logger("sub_heartflow")
@@ -123,14 +124,14 @@ class MindProcessor(BaseProcessor):
async def process_info( async def process_info(
self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos self, observations: Optional[List[Observation]] = None, running_memorys: Optional[List[Dict]] = None, *infos
) -> List[dict]: ) -> List[InfoBase]:
"""处理信息对象 """处理信息对象
Args: Args:
*infos: 可变数量的InfoBase类型的信息对象 *infos: 可变数量的InfoBase类型的信息对象
Returns: Returns:
List[dict]: 处理后的结构化信息列表 List[InfoBase]: 处理后的结构化信息列表
""" """
current_mind = await self.do_thinking_before_reply(observations, running_memorys) current_mind = await self.do_thinking_before_reply(observations, running_memorys)
@@ -180,6 +181,8 @@ class MindProcessor(BaseProcessor):
# 获取现有想法和情绪状态 # 获取现有想法和情绪状态
previous_mind = self.current_mind if self.current_mind else "" previous_mind = self.current_mind if self.current_mind else ""
if observations is None:
observations = []
for observation in observations: for observation in observations:
if isinstance(observation, ChattingObservation): if isinstance(observation, ChattingObservation):
# 获取聊天元信息 # 获取聊天元信息