🤖 自动格式化代码 [skip ci]
This commit is contained in:
@@ -50,7 +50,6 @@ PROCESSOR_CLASSES = {
|
|||||||
logger = get_logger("hfc") # Logger Name Changed
|
logger = get_logger("hfc") # Logger Name Changed
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class HeartFChatting:
|
class HeartFChatting:
|
||||||
"""
|
"""
|
||||||
管理一个连续的Focus Chat循环
|
管理一个连续的Focus Chat循环
|
||||||
@@ -100,9 +99,7 @@ class HeartFChatting:
|
|||||||
self._register_default_processors()
|
self._register_default_processors()
|
||||||
|
|
||||||
self.action_manager = ActionManager()
|
self.action_manager = ActionManager()
|
||||||
self.action_planner = ActionPlanner(
|
self.action_planner = ActionPlanner(log_prefix=self.log_prefix, action_manager=self.action_manager)
|
||||||
log_prefix=self.log_prefix, action_manager=self.action_manager
|
|
||||||
)
|
|
||||||
self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id)
|
self.action_modifier = ActionModifier(action_manager=self.action_manager, chat_id=self.stream_id)
|
||||||
self.action_observation = ActionObservation(observe_id=self.stream_id)
|
self.action_observation = ActionObservation(observe_id=self.stream_id)
|
||||||
self.action_observation.set_action_manager(self.action_manager)
|
self.action_observation.set_action_manager(self.action_manager)
|
||||||
@@ -360,7 +357,6 @@ class HeartFChatting:
|
|||||||
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒"
|
formatted_time = f"{elapsed * 1000:.2f}毫秒" if elapsed < 1 else f"{elapsed:.2f}秒"
|
||||||
timer_strings.append(f"{name}: {formatted_time}")
|
timer_strings.append(f"{name}: {formatted_time}")
|
||||||
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考,"
|
f"{self.log_prefix} 第{self._current_cycle_detail.cycle_id}次思考,"
|
||||||
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
|
f"耗时: {self._current_cycle_detail.end_time - self._current_cycle_detail.start_time:.1f}秒, "
|
||||||
@@ -494,7 +490,6 @@ class HeartFChatting:
|
|||||||
)
|
)
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
|
|
||||||
return all_plan_info
|
return all_plan_info
|
||||||
|
|
||||||
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
|
async def _observe_process_plan_action_loop(self, cycle_timers: dict, thinking_id: str) -> dict:
|
||||||
@@ -528,7 +523,6 @@ class HeartFChatting:
|
|||||||
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
|
logger.error(f"{self.log_prefix} 动作修改失败: {e}")
|
||||||
# 继续执行,不中断流程
|
# 继续执行,不中断流程
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
all_plan_info = await self._process_processors(self.observations)
|
all_plan_info = await self._process_processors(self.observations)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -118,7 +118,6 @@ class MemoryActivator:
|
|||||||
# 添加新的关键词到缓存
|
# 添加新的关键词到缓存
|
||||||
self.cached_keywords.update(keywords)
|
self.cached_keywords.update(keywords)
|
||||||
|
|
||||||
|
|
||||||
# 调用记忆系统获取相关记忆
|
# 调用记忆系统获取相关记忆
|
||||||
related_memory = await hippocampus_manager.get_memory_from_topic(
|
related_memory = await hippocampus_manager.get_memory_from_topic(
|
||||||
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
valid_keywords=keywords, max_memory_num=3, max_memory_length=2, max_depth=3
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ willing_manager = get_willing_manager()
|
|||||||
|
|
||||||
logger = get_logger("normal_chat")
|
logger = get_logger("normal_chat")
|
||||||
|
|
||||||
|
|
||||||
class NormalChat:
|
class NormalChat:
|
||||||
"""
|
"""
|
||||||
普通聊天处理类,负责处理非核心对话的聊天逻辑。
|
普通聊天处理类,负责处理非核心对话的聊天逻辑。
|
||||||
@@ -77,7 +78,6 @@ class NormalChat:
|
|||||||
self.recent_replies = []
|
self.recent_replies = []
|
||||||
self.max_replies_history = 20 # 最多保存最近20条回复记录
|
self.max_replies_history = 20 # 最多保存最近20条回复记录
|
||||||
|
|
||||||
|
|
||||||
# 添加回调函数,用于在满足条件时通知切换到focus_chat模式
|
# 添加回调函数,用于在满足条件时通知切换到focus_chat模式
|
||||||
self.on_switch_to_focus_callback = on_switch_to_focus_callback
|
self.on_switch_to_focus_callback = on_switch_to_focus_callback
|
||||||
|
|
||||||
@@ -568,9 +568,7 @@ class NormalChat:
|
|||||||
available_actions = None
|
available_actions = None
|
||||||
if self.enable_planner:
|
if self.enable_planner:
|
||||||
try:
|
try:
|
||||||
await self.action_modifier.modify_actions(
|
await self.action_modifier.modify_actions(mode="normal", message_content=message.processed_plain_text)
|
||||||
mode="normal", message_content=message.processed_plain_text
|
|
||||||
)
|
|
||||||
available_actions = self.action_manager.get_using_actions_for_mode("normal")
|
available_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}")
|
logger.warning(f"[{self.stream_name}] 获取available_actions失败: {e}")
|
||||||
@@ -954,6 +952,7 @@ class NormalChat:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}")
|
logger.warning(f"[{self.stream_name}] 获取疲劳调整系数时出错: {e}")
|
||||||
return 1.0 # 出错时返回正常系数
|
return 1.0 # 出错时返回正常系数
|
||||||
|
|
||||||
async def _check_should_switch_to_focus(self) -> bool:
|
async def _check_should_switch_to_focus(self) -> bool:
|
||||||
"""
|
"""
|
||||||
检查是否满足切换到focus模式的条件
|
检查是否满足切换到focus模式的条件
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ from typing import List, Optional, Any, Dict
|
|||||||
from src.chat.heart_flow.observation.observation import Observation
|
from src.chat.heart_flow.observation.observation import Observation
|
||||||
from src.common.logger import get_logger
|
from src.common.logger import get_logger
|
||||||
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
|
||||||
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
|
|
||||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||||
from src.config.config import global_config
|
from src.config.config import global_config
|
||||||
from src.llm_models.utils_model import LLMRequest
|
from src.llm_models.utils_model import LLMRequest
|
||||||
@@ -104,7 +103,6 @@ class ActionModifier:
|
|||||||
self.action_manager.remove_action_from_using(action_name)
|
self.action_manager.remove_action_from_using(action_name)
|
||||||
logger.debug(f"{self.log_prefix}阶段一移除动作: {action_name},原因: {reason}")
|
logger.debug(f"{self.log_prefix}阶段一移除动作: {action_name},原因: {reason}")
|
||||||
|
|
||||||
|
|
||||||
# === 第二阶段:激活类型判定 ===
|
# === 第二阶段:激活类型判定 ===
|
||||||
if chat_content is not None:
|
if chat_content is not None:
|
||||||
logger.debug(f"{self.log_prefix}开始激活类型判定阶段")
|
logger.debug(f"{self.log_prefix}开始激活类型判定阶段")
|
||||||
@@ -141,9 +139,7 @@ class ActionModifier:
|
|||||||
associated_types_str = ", ".join(data["associated_types"])
|
associated_types_str = ", ".join(data["associated_types"])
|
||||||
reason = f"适配器不支持(需要: {associated_types_str})"
|
reason = f"适配器不支持(需要: {associated_types_str})"
|
||||||
type_mismatched_actions.append((action_name, reason))
|
type_mismatched_actions.append((action_name, reason))
|
||||||
logger.debug(
|
logger.debug(f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}")
|
||||||
f"{self.log_prefix}决定移除动作: {action_name},原因: {reason}"
|
|
||||||
)
|
|
||||||
return type_mismatched_actions
|
return type_mismatched_actions
|
||||||
|
|
||||||
async def _get_deactivated_actions_by_type(
|
async def _get_deactivated_actions_by_type(
|
||||||
@@ -517,21 +513,23 @@ class ActionModifier:
|
|||||||
# 如果最近sec_thres_reply_num次都是reply,40%概率移除
|
# 如果最近sec_thres_reply_num次都是reply,40%概率移除
|
||||||
removal_probability = 0.4 / global_config.focus_chat.consecutive_replies
|
removal_probability = 0.4 / global_config.focus_chat.consecutive_replies
|
||||||
if random.random() < removal_probability:
|
if random.random() < removal_probability:
|
||||||
reason = f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
reason = (
|
||||||
|
f"连续回复较多(最近{sec_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
||||||
|
)
|
||||||
removals.append(("reply", reason))
|
removals.append(("reply", reason))
|
||||||
elif len(last_max_reply_num) >= one_thres_reply_num and all(last_max_reply_num[-one_thres_reply_num:]):
|
elif len(last_max_reply_num) >= one_thres_reply_num and all(last_max_reply_num[-one_thres_reply_num:]):
|
||||||
# 如果最近one_thres_reply_num次都是reply,20%概率移除
|
# 如果最近one_thres_reply_num次都是reply,20%概率移除
|
||||||
removal_probability = 0.2 / global_config.focus_chat.consecutive_replies
|
removal_probability = 0.2 / global_config.focus_chat.consecutive_replies
|
||||||
if random.random() < removal_probability:
|
if random.random() < removal_probability:
|
||||||
reason = f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
reason = (
|
||||||
|
f"连续回复检测(最近{one_thres_reply_num}次全是reply,{removal_probability:.2f}概率移除,触发移除)"
|
||||||
|
)
|
||||||
removals.append(("reply", reason))
|
removals.append(("reply", reason))
|
||||||
else:
|
else:
|
||||||
logger.debug(f"{self.log_prefix}连续回复检测:无需移除reply动作,最近回复模式正常")
|
logger.debug(f"{self.log_prefix}连续回复检测:无需移除reply动作,最近回复模式正常")
|
||||||
|
|
||||||
return removals
|
return removals
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_actions_count(self) -> int:
|
def get_available_actions_count(self) -> int:
|
||||||
"""获取当前可用动作数量(排除默认的no_action)"""
|
"""获取当前可用动作数量(排除默认的no_action)"""
|
||||||
current_actions = self.action_manager.get_using_actions_for_mode("normal")
|
current_actions = self.action_manager.get_using_actions_for_mode("normal")
|
||||||
|
|||||||
@@ -83,9 +83,7 @@ class ActionPlanner:
|
|||||||
request_type="focus.planner", # 用于动作规划
|
request_type="focus.planner", # 用于动作规划
|
||||||
)
|
)
|
||||||
|
|
||||||
async def plan(
|
async def plan(self, all_plan_info: List[InfoBase], loop_start_time: float) -> Dict[str, Any]:
|
||||||
self, all_plan_info: List[InfoBase],loop_start_time: float
|
|
||||||
) -> Dict[str, Any]:
|
|
||||||
"""
|
"""
|
||||||
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
|
规划器 (Planner): 使用LLM根据上下文决定做出什么动作。
|
||||||
|
|
||||||
|
|||||||
@@ -98,7 +98,6 @@ class DefaultReplyer:
|
|||||||
self.log_prefix = "replyer"
|
self.log_prefix = "replyer"
|
||||||
self.request_type = request_type
|
self.request_type = request_type
|
||||||
|
|
||||||
|
|
||||||
if model_configs:
|
if model_configs:
|
||||||
self.express_model_configs = model_configs
|
self.express_model_configs = model_configs
|
||||||
else:
|
else:
|
||||||
@@ -470,7 +469,13 @@ class DefaultReplyer:
|
|||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
return name, result, duration
|
return name, result, duration
|
||||||
|
|
||||||
async def build_prompt_reply_context(self, reply_data=None, available_actions: List[str] = None, enable_timeout: bool = False, enable_tool: bool = True) -> str:
|
async def build_prompt_reply_context(
|
||||||
|
self,
|
||||||
|
reply_data=None,
|
||||||
|
available_actions: List[str] = None,
|
||||||
|
enable_timeout: bool = False,
|
||||||
|
enable_tool: bool = True,
|
||||||
|
) -> str:
|
||||||
"""
|
"""
|
||||||
构建回复器上下文
|
构建回复器上下文
|
||||||
|
|
||||||
@@ -537,10 +542,16 @@ class DefaultReplyer:
|
|||||||
|
|
||||||
# 并行执行四个构建任务
|
# 并行执行四个构建任务
|
||||||
task_results = await asyncio.gather(
|
task_results = await asyncio.gather(
|
||||||
self._time_and_run_task(self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits"),
|
self._time_and_run_task(
|
||||||
self._time_and_run_task(self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info"),
|
self.build_expression_habits(chat_talking_prompt_half, target), "build_expression_habits"
|
||||||
|
),
|
||||||
|
self._time_and_run_task(
|
||||||
|
self.build_relation_info(reply_data, chat_talking_prompt_half), "build_relation_info"
|
||||||
|
),
|
||||||
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_half, target), "build_memory_block"),
|
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_half, target), "build_memory_block"),
|
||||||
self._time_and_run_task(self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info"),
|
self._time_and_run_task(
|
||||||
|
self.build_tool_info(reply_data, chat_talking_prompt_half, enable_tool=enable_tool), "build_tool_info"
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
# 处理结果
|
# 处理结果
|
||||||
|
|||||||
@@ -273,7 +273,6 @@ class MessageReceiveConfig(ConfigBase):
|
|||||||
class NormalChatConfig(ConfigBase):
|
class NormalChatConfig(ConfigBase):
|
||||||
"""普通聊天配置类"""
|
"""普通聊天配置类"""
|
||||||
|
|
||||||
|
|
||||||
willing_mode: str = "classical"
|
willing_mode: str = "classical"
|
||||||
"""意愿模式"""
|
"""意愿模式"""
|
||||||
|
|
||||||
@@ -290,7 +289,6 @@ class NormalChatConfig(ConfigBase):
|
|||||||
"""是否启用动作规划器"""
|
"""是否启用动作规划器"""
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class FocusChatConfig(ConfigBase):
|
class FocusChatConfig(ConfigBase):
|
||||||
"""专注聊天配置类"""
|
"""专注聊天配置类"""
|
||||||
|
|||||||
@@ -92,9 +92,7 @@ async def generate_reply(
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# 获取回复器
|
# 获取回复器
|
||||||
replyer = get_replyer(
|
replyer = get_replyer(chat_stream, chat_id, model_configs=model_configs, request_type=request_type)
|
||||||
chat_stream, chat_id, model_configs=model_configs, request_type=request_type
|
|
||||||
)
|
|
||||||
if not replyer:
|
if not replyer:
|
||||||
logger.error("[GeneratorAPI] 无法获取回复器")
|
logger.error("[GeneratorAPI] 无法获取回复器")
|
||||||
return False, []
|
return False, []
|
||||||
|
|||||||
Reference in New Issue
Block a user