feat: 为Kokoro Flow Chatter实现统一模式,支持模块化提示生成
新增统一模式:通过单次LLM调用同时处理推理与响应生成 采用模块化提示组件:系统提示与用户提示分离,提升灵活性和可维护性 日志更新:更清晰地记录生成响应与执行动作 版本更新:在机器人配置模板中递增版本号以反映变更 上下文处理优化:改进用户交互与记忆管理
This commit is contained in:
@@ -172,9 +172,9 @@ class ChatterActionManager:
|
||||
# 设置正在处理的状态
|
||||
chat_stream.context.is_replying = True
|
||||
|
||||
# no_action 特殊处理
|
||||
if action_name == "no_action":
|
||||
return {"action_type": "no_action", "success": True, "reply_text": ""}
|
||||
# no_action / do_nothing 特殊处理
|
||||
if action_name in ("no_action", "do_nothing"):
|
||||
return {"action_type": action_name, "success": True, "reply_text": ""}
|
||||
|
||||
# 创建并执行动作
|
||||
action_handler = self.create_action(
|
||||
|
||||
@@ -638,6 +638,12 @@ DEFAULT_MODULE_COLORS = {
|
||||
"context_web": "#5F5F00", # 深黄色
|
||||
"gift_manager": "#D7005F", # 粉红色
|
||||
"prompt": "#875FFF", # 紫色(mais4u的prompt)
|
||||
# Kokoro Flow Chatter (KFC) 系统
|
||||
"kfc_planner": "#b19cd9", # 淡紫色 - KFC 规划器
|
||||
"kfc_replyer": "#b19cd9", # 淡紫色 - KFC 回复器
|
||||
"kfc_chatter": "#b19cd9", # 淡紫色 - KFC 主模块
|
||||
"kfc_unified": "#d7afff", # 柔和紫色 - KFC 统一模式
|
||||
"kfc_proactive_thinker": "#d7afff", # 柔和紫色 - KFC 主动思考器
|
||||
"super_chat_manager": "#AF005F", # 紫红色
|
||||
"watching": "#AF5F5F", # 深橙色
|
||||
"offline_llm": "#303030", # 深灰色
|
||||
@@ -682,6 +688,7 @@ DEFAULT_MODULE_COLORS = {
|
||||
"kfc_session_manager": "#87D787", # 绿色 - 会话管理
|
||||
"kfc_scheduler": "#D787AF", # 柔和粉色 - 调度器
|
||||
"kfc_post_processor": "#5F87FF", # 蓝色 - 后处理
|
||||
"kfc_unified": "#FF5FAF", # 粉色 - 统一模式
|
||||
}
|
||||
|
||||
DEFAULT_MODULE_ALIASES = {
|
||||
@@ -818,6 +825,7 @@ DEFAULT_MODULE_ALIASES = {
|
||||
"kfc_session_manager": "KFC会话",
|
||||
"kfc_scheduler": "KFC调度",
|
||||
"kfc_post_processor": "KFC后处理",
|
||||
"kfc_unified": "KFC统一模式",
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
"""
|
||||
Kokoro Flow Chatter - 私聊特化的心流聊天器
|
||||
Kokoro Flow Chatter (KFC) - 私聊特化的心流聊天器
|
||||
|
||||
重构版本,核心设计理念:
|
||||
重构版本,支持双模式架构:
|
||||
|
||||
工作模式:
|
||||
- unified(统一模式): 单次 LLM 调用完成思考和回复生成(默认)
|
||||
- split(分离模式): Planner + Replyer 两次 LLM 调用,更精细的控制
|
||||
|
||||
核心设计理念:
|
||||
1. Chatter 职责极简化:只负责"收到消息 → 规划执行"
|
||||
2. Session 状态简化:只有 IDLE 和 WAITING 两种状态
|
||||
3. 独立的 Replyer:专属的提示词构建和 LLM 交互
|
||||
@@ -21,6 +27,7 @@ from .session import KokoroSession, SessionManager, get_session_manager
|
||||
from .chatter import KokoroFlowChatter
|
||||
from .planner import generate_plan
|
||||
from .replyer import generate_reply_text
|
||||
from .unified import generate_unified_response
|
||||
from .proactive_thinker import (
|
||||
ProactiveThinker,
|
||||
get_proactive_thinker,
|
||||
@@ -28,6 +35,7 @@ from .proactive_thinker import (
|
||||
stop_proactive_thinker,
|
||||
)
|
||||
from .config import (
|
||||
KFCMode,
|
||||
KokoroFlowChatterConfig,
|
||||
get_config,
|
||||
load_config,
|
||||
@@ -38,11 +46,11 @@ from src.plugin_system.base.plugin_metadata import PluginMetadata
|
||||
|
||||
__plugin_meta__ = PluginMetadata(
|
||||
name="Kokoro Flow Chatter",
|
||||
description="专为私聊设计的深度情感交互处理器,实现心理状态驱动的对话体验",
|
||||
usage="在私聊场景中自动启用,可通过 [kokoro_flow_chatter].enable 配置开关",
|
||||
version="2.0.0",
|
||||
description="专为私聊设计的深度情感交互处理器,支持统一/分离双模式",
|
||||
usage="在私聊场景中自动启用,可通过 [kokoro_flow_chatter].enable 和 .mode 配置",
|
||||
version="3.1.0",
|
||||
author="MoFox",
|
||||
keywords=["chatter", "kokoro", "private", "emotional", "narrative"],
|
||||
keywords=["chatter", "kokoro", "private", "emotional", "narrative", "dual-mode"],
|
||||
categories=["Chat", "AI", "Emotional"],
|
||||
extra={"is_built_in": True, "chat_type": "private"},
|
||||
)
|
||||
@@ -63,12 +71,14 @@ __all__ = [
|
||||
"KokoroFlowChatter",
|
||||
"generate_plan",
|
||||
"generate_reply_text",
|
||||
"generate_unified_response",
|
||||
# Proactive Thinker
|
||||
"ProactiveThinker",
|
||||
"get_proactive_thinker",
|
||||
"start_proactive_thinker",
|
||||
"stop_proactive_thinker",
|
||||
# Config
|
||||
"KFCMode",
|
||||
"KokoroFlowChatterConfig",
|
||||
"get_config",
|
||||
"load_config",
|
||||
|
||||
@@ -1,11 +1,19 @@
|
||||
"""
|
||||
Kokoro Flow Chatter - Chatter 主类
|
||||
|
||||
极简设计,只负责:
|
||||
1. 收到消息
|
||||
2. 调用 Planner 生成规划
|
||||
3. 执行动作(回复在 Action.execute() 中生成)
|
||||
4. 更新 Session
|
||||
支持两种工作模式:
|
||||
1. unified(统一模式): 单次 LLM 调用完成思考 + 回复生成
|
||||
2. split(分离模式): Planner + Replyer 两次 LLM 调用
|
||||
|
||||
核心设计:
|
||||
- Chatter 只负责 "收到消息 → 规划执行" 的流程
|
||||
- 无论 Session 之前是什么状态,流程都一样
|
||||
- 区别只体现在提示词中
|
||||
|
||||
不负责:
|
||||
- 等待超时处理(由 ProactiveThinker 负责)
|
||||
- 连续思考(由 ProactiveThinker 负责)
|
||||
- 主动发起对话(由 ProactiveThinker 负责)
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -18,8 +26,8 @@ from src.common.logger import get_logger
|
||||
from src.plugin_system.base.base_chatter import BaseChatter
|
||||
from src.plugin_system.base.component_types import ChatType
|
||||
|
||||
from .config import KFCMode, get_config
|
||||
from .models import SessionStatus
|
||||
from .planner import generate_plan
|
||||
from .session import get_session_manager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -27,15 +35,15 @@ if TYPE_CHECKING:
|
||||
|
||||
logger = get_logger("kfc_chatter")
|
||||
|
||||
# 控制台颜色
|
||||
SOFT_PURPLE = "\033[38;5;183m"
|
||||
RESET = "\033[0m"
|
||||
|
||||
|
||||
class KokoroFlowChatter(BaseChatter):
|
||||
"""
|
||||
Kokoro Flow Chatter - 私聊特化的心流聊天器
|
||||
|
||||
支持两种工作模式(通过配置切换):
|
||||
- unified: 单次 LLM 调用完成思考和回复
|
||||
- split: Planner + Replyer 两次 LLM 调用
|
||||
|
||||
核心设计:
|
||||
- Chatter 只负责 "收到消息 → 规划执行" 的流程
|
||||
- 无论 Session 之前是什么状态,流程都一样
|
||||
@@ -62,18 +70,24 @@ class KokoroFlowChatter(BaseChatter):
|
||||
# 核心组件
|
||||
self.session_manager = get_session_manager()
|
||||
|
||||
# 加载配置
|
||||
self._config = get_config()
|
||||
self._mode = self._config.mode
|
||||
|
||||
# 并发控制
|
||||
self._lock = asyncio.Lock()
|
||||
self._processing = False
|
||||
|
||||
# 统计
|
||||
self._stats = {
|
||||
self._stats: dict[str, Any] = {
|
||||
"messages_processed": 0,
|
||||
"successful_responses": 0,
|
||||
"failed_responses": 0,
|
||||
}
|
||||
|
||||
logger.info(f"{SOFT_PURPLE}[KFC]{RESET} 初始化完成: stream_id={stream_id}")
|
||||
# 输出初始化信息
|
||||
mode_str = "统一模式" if self._mode == KFCMode.UNIFIED else "分离模式"
|
||||
logger.info(f"初始化完成 (模式: {mode_str}): stream_id={stream_id}")
|
||||
|
||||
async def execute(self, context: StreamContext) -> dict:
|
||||
"""
|
||||
@@ -84,7 +98,7 @@ class KokoroFlowChatter(BaseChatter):
|
||||
2. 获取未读消息
|
||||
3. 记录用户消息到 mental_log
|
||||
4. 确定 situation_type(根据之前的等待状态)
|
||||
5. 调用 Replyer 生成响应
|
||||
5. 根据模式调用对应的生成器
|
||||
6. 执行动作
|
||||
7. 更新 Session(记录 Bot 规划,设置等待状态)
|
||||
8. 保存 Session
|
||||
@@ -115,7 +129,6 @@ class KokoroFlowChatter(BaseChatter):
|
||||
situation_type = self._determine_situation_type(session)
|
||||
|
||||
# 5. **立即**结束等待状态,防止 ProactiveThinker 并发处理
|
||||
# 在调用 LLM 之前就结束等待,避免 ProactiveThinker 检测到超时后也开始处理
|
||||
if session.status == SessionStatus.WAITING:
|
||||
session.end_waiting()
|
||||
await self.session_manager.save_session(user_id)
|
||||
@@ -143,25 +156,26 @@ class KokoroFlowChatter(BaseChatter):
|
||||
# 8. 获取聊天流
|
||||
chat_stream = await self._get_chat_stream()
|
||||
|
||||
# 9. 调用 Planner 生成行动计划
|
||||
plan_response = await generate_plan(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type=situation_type,
|
||||
chat_stream=chat_stream,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
# 9. 根据模式调用对应的生成器
|
||||
if self._mode == KFCMode.UNIFIED:
|
||||
plan_response = await self._execute_unified_mode(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type=situation_type,
|
||||
chat_stream=chat_stream,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
else:
|
||||
plan_response = await self._execute_split_mode(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
user_id=user_id,
|
||||
situation_type=situation_type,
|
||||
chat_stream=chat_stream,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
|
||||
# 10. 为 kfc_reply 动作注入必要的上下文信息
|
||||
for action in plan_response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
# 注入回复生成所需的上下文
|
||||
action.params["user_id"] = user_id
|
||||
action.params["user_name"] = user_name
|
||||
action.params["thought"] = plan_response.thought
|
||||
action.params["situation_type"] = situation_type
|
||||
|
||||
# 11. 执行动作(回复生成在 Action.execute() 中完成)
|
||||
# 10. 执行动作
|
||||
exec_results = []
|
||||
has_reply = False
|
||||
for action in plan_response.actions:
|
||||
@@ -178,7 +192,7 @@ class KokoroFlowChatter(BaseChatter):
|
||||
if result.get("success") and action.type in ("kfc_reply", "respond"):
|
||||
has_reply = True
|
||||
|
||||
# 12. 记录 Bot 规划到 mental_log
|
||||
# 11. 记录 Bot 规划到 mental_log
|
||||
session.add_bot_planning(
|
||||
thought=plan_response.thought,
|
||||
actions=[a.to_dict() for a in plan_response.actions],
|
||||
@@ -186,7 +200,7 @@ class KokoroFlowChatter(BaseChatter):
|
||||
max_wait_seconds=plan_response.max_wait_seconds,
|
||||
)
|
||||
|
||||
# 13. 更新 Session 状态
|
||||
# 12. 更新 Session 状态
|
||||
if plan_response.max_wait_seconds > 0:
|
||||
session.start_waiting(
|
||||
expected_reaction=plan_response.expected_reaction,
|
||||
@@ -195,20 +209,22 @@ class KokoroFlowChatter(BaseChatter):
|
||||
else:
|
||||
session.end_waiting()
|
||||
|
||||
# 14. 标记消息为已读
|
||||
# 13. 标记消息为已读
|
||||
for msg in unread_messages:
|
||||
context.mark_message_as_read(str(msg.message_id))
|
||||
|
||||
# 15. 保存 Session
|
||||
# 14. 保存 Session
|
||||
await self.session_manager.save_session(user_id)
|
||||
|
||||
# 16. 更新统计
|
||||
# 15. 更新统计
|
||||
self._stats["messages_processed"] += len(unread_messages)
|
||||
if has_reply:
|
||||
self._stats["successful_responses"] += 1
|
||||
|
||||
# 输出完成信息
|
||||
mode_str = "unified" if self._mode == KFCMode.UNIFIED else "split"
|
||||
logger.info(
|
||||
f"{SOFT_PURPLE}[KFC]{RESET} 处理完成: "
|
||||
f"处理完成 ({mode_str}): "
|
||||
f"user={user_name}, situation={situation_type}, "
|
||||
f"actions={[a.type for a in plan_response.actions]}, "
|
||||
f"wait={plan_response.max_wait_seconds}s"
|
||||
@@ -220,6 +236,7 @@ class KokoroFlowChatter(BaseChatter):
|
||||
has_reply=has_reply,
|
||||
thought=plan_response.thought,
|
||||
situation_type=situation_type,
|
||||
mode=mode_str,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
@@ -232,6 +249,68 @@ class KokoroFlowChatter(BaseChatter):
|
||||
finally:
|
||||
self._processing = False
|
||||
|
||||
async def _execute_unified_mode(
|
||||
self,
|
||||
session,
|
||||
user_name: str,
|
||||
situation_type: str,
|
||||
chat_stream,
|
||||
available_actions,
|
||||
):
|
||||
"""
|
||||
统一模式:单次 LLM 调用完成思考 + 回复生成
|
||||
|
||||
LLM 输出的 JSON 中 kfc_reply 动作已包含 content 字段,
|
||||
无需再调用 Replyer 生成回复。
|
||||
"""
|
||||
from .unified import generate_unified_response
|
||||
|
||||
plan_response = await generate_unified_response(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type=situation_type,
|
||||
chat_stream=chat_stream,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
|
||||
# 统一模式下 content 已经在 actions 中,无需注入
|
||||
return plan_response
|
||||
|
||||
async def _execute_split_mode(
|
||||
self,
|
||||
session,
|
||||
user_name: str,
|
||||
user_id: str,
|
||||
situation_type: str,
|
||||
chat_stream,
|
||||
available_actions,
|
||||
):
|
||||
"""
|
||||
分离模式:Planner + Replyer 两次 LLM 调用
|
||||
|
||||
1. Planner 生成行动计划(JSON,kfc_reply 不含 content)
|
||||
2. 为 kfc_reply 动作注入上下文,由 Action.execute() 调用 Replyer 生成回复
|
||||
"""
|
||||
from .planner import generate_plan
|
||||
|
||||
plan_response = await generate_plan(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type=situation_type,
|
||||
chat_stream=chat_stream,
|
||||
available_actions=available_actions,
|
||||
)
|
||||
|
||||
# 为 kfc_reply 动作注入回复生成所需的上下文
|
||||
for action in plan_response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
action.params["user_id"] = user_id
|
||||
action.params["user_name"] = user_name
|
||||
action.params["thought"] = plan_response.thought
|
||||
action.params["situation_type"] = situation_type
|
||||
|
||||
return plan_response
|
||||
|
||||
def _determine_situation_type(self, session) -> str:
|
||||
"""
|
||||
确定当前情况类型
|
||||
@@ -282,9 +361,16 @@ class KokoroFlowChatter(BaseChatter):
|
||||
|
||||
def get_stats(self) -> dict[str, Any]:
|
||||
"""获取统计信息"""
|
||||
return self._stats.copy()
|
||||
stats = self._stats.copy()
|
||||
stats["mode"] = self._mode.value
|
||||
return stats
|
||||
|
||||
@property
|
||||
def is_processing(self) -> bool:
|
||||
"""是否正在处理"""
|
||||
return self._processing
|
||||
|
||||
@property
|
||||
def mode(self) -> KFCMode:
|
||||
"""当前工作模式"""
|
||||
return self._mode
|
||||
|
||||
@@ -2,12 +2,39 @@
|
||||
Kokoro Flow Chatter - 配置
|
||||
|
||||
可以通过 TOML 配置文件覆盖默认值
|
||||
|
||||
支持两种工作模式:
|
||||
- unified: 统一模式,单次 LLM 调用完成思考和回复生成(类似旧版架构)
|
||||
- split: 分离模式,Planner + Replyer 两次 LLM 调用(推荐,更精细的控制)
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
class KFCMode(str, Enum):
|
||||
"""KFC 工作模式"""
|
||||
|
||||
# 统一模式:单次 LLM 调用,生成思考 + 回复(类似旧版架构)
|
||||
UNIFIED = "unified"
|
||||
|
||||
# 分离模式:Planner 生成规划,Replyer 生成回复(推荐)
|
||||
SPLIT = "split"
|
||||
|
||||
@classmethod
|
||||
def from_str(cls, value: str) -> "KFCMode":
|
||||
"""从字符串创建模式"""
|
||||
value = value.lower().strip()
|
||||
if value == "unified":
|
||||
return cls.UNIFIED
|
||||
elif value == "split":
|
||||
return cls.SPLIT
|
||||
else:
|
||||
# 默认使用统一模式
|
||||
return cls.UNIFIED
|
||||
|
||||
|
||||
@dataclass
|
||||
class WaitingDefaults:
|
||||
"""等待配置默认值"""
|
||||
@@ -47,12 +74,6 @@ class ProactiveConfig:
|
||||
# 关系门槛:最低好感度,达到此值才会主动关心
|
||||
min_affinity_for_proactive: float = 0.3
|
||||
|
||||
# 是否启用早安问候
|
||||
enable_morning_greeting: bool = True
|
||||
|
||||
# 是否启用晚安问候
|
||||
enable_night_greeting: bool = True
|
||||
|
||||
|
||||
@dataclass
|
||||
class PromptConfig:
|
||||
@@ -109,6 +130,11 @@ class KokoroFlowChatterConfig:
|
||||
# 是否启用
|
||||
enabled: bool = True
|
||||
|
||||
# 工作模式:unified(统一模式)或 split(分离模式)
|
||||
# - unified: 单次 LLM 调用完成思考和回复生成(类似旧版架构,更简洁)
|
||||
# - split: Planner + Replyer 两次 LLM 调用(更精细的控制,推荐)
|
||||
mode: KFCMode = KFCMode.UNIFIED
|
||||
|
||||
# 启用的消息源类型(空列表表示全部)
|
||||
enabled_stream_types: List[str] = field(default_factory=lambda: ["private"])
|
||||
|
||||
@@ -165,6 +191,10 @@ def load_config() -> KokoroFlowChatterConfig:
|
||||
if hasattr(kfc_cfg, 'debug'):
|
||||
config.debug = kfc_cfg.debug
|
||||
|
||||
# 工作模式配置
|
||||
if hasattr(kfc_cfg, 'mode'):
|
||||
config.mode = KFCMode.from_str(str(kfc_cfg.mode))
|
||||
|
||||
# 等待配置
|
||||
if hasattr(kfc_cfg, 'waiting'):
|
||||
wait_cfg = kfc_cfg.waiting
|
||||
@@ -188,8 +218,6 @@ def load_config() -> KokoroFlowChatterConfig:
|
||||
quiet_hours_end=getattr(pro_cfg, 'quiet_hours_end', "07:00"),
|
||||
trigger_probability=getattr(pro_cfg, 'trigger_probability', 0.3),
|
||||
min_affinity_for_proactive=getattr(pro_cfg, 'min_affinity_for_proactive', 0.3),
|
||||
enable_morning_greeting=getattr(pro_cfg, 'enable_morning_greeting', True),
|
||||
enable_night_greeting=getattr(pro_cfg, 'enable_night_greeting', True),
|
||||
)
|
||||
|
||||
# 提示词配置
|
||||
|
||||
@@ -102,11 +102,12 @@ def _parse_response(raw_response: str) -> LLMResponse:
|
||||
response = LLMResponse.from_dict(data)
|
||||
|
||||
if response.thought:
|
||||
logger.info(
|
||||
f"[KFC Planner] 解析成功: thought={response.thought[:50]}..., "
|
||||
f"actions={[a.type for a in response.actions]}"
|
||||
)
|
||||
# 使用 logger 输出美化日志(颜色通过 logger 系统配置)
|
||||
logger.info(f"💭 {response.thought}")
|
||||
|
||||
actions_str = ", ".join(a.type for a in response.actions)
|
||||
logger.debug(f"actions={actions_str}")
|
||||
else:
|
||||
logger.warning("[KFC Planner] 响应缺少 thought")
|
||||
logger.warning("响应缺少 thought")
|
||||
|
||||
return response
|
||||
|
||||
@@ -7,6 +7,10 @@ Kokoro Flow Chatter - 主动思考器
|
||||
3. 长期沉默后主动发起对话
|
||||
|
||||
通过 UnifiedScheduler 定期触发,与 Chatter 解耦
|
||||
|
||||
支持两种工作模式(与 Chatter 保持一致):
|
||||
- unified: 单次 LLM 调用完成思考和回复
|
||||
- split: Planner + Replyer 两次 LLM 调用
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
@@ -20,9 +24,8 @@ from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.plugin_system.apis.unified_scheduler import TriggerType, unified_scheduler
|
||||
|
||||
from .config import KFCMode, get_config
|
||||
from .models import EventType, SessionStatus
|
||||
from .planner import generate_plan
|
||||
from .replyer import _clean_reply_text
|
||||
from .session import KokoroSession, get_session_manager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -44,6 +47,10 @@ class ProactiveThinker:
|
||||
- 定期检查所有 WAITING 状态的 Session
|
||||
- 触发连续思考或超时决策
|
||||
- 定期检查长期沉默的 Session,考虑主动发起
|
||||
|
||||
支持两种工作模式(与 Chatter 保持一致):
|
||||
- unified: 单次 LLM 调用
|
||||
- split: Planner + Replyer 两次调用
|
||||
"""
|
||||
|
||||
# 连续思考触发点(等待进度百分比)
|
||||
@@ -74,11 +81,12 @@ class ProactiveThinker:
|
||||
|
||||
def _load_config(self) -> None:
|
||||
"""加载配置 - 使用统一的配置系统"""
|
||||
from .config import get_config
|
||||
|
||||
config = get_config()
|
||||
proactive_cfg = config.proactive
|
||||
|
||||
# 工作模式
|
||||
self._mode = config.mode
|
||||
|
||||
# 等待检查间隔(秒)
|
||||
self.waiting_check_interval = 15.0
|
||||
# 主动思考检查间隔(秒)
|
||||
@@ -92,13 +100,11 @@ class ProactiveThinker:
|
||||
self.quiet_hours_end = proactive_cfg.quiet_hours_end
|
||||
self.trigger_probability = proactive_cfg.trigger_probability
|
||||
self.min_affinity_for_proactive = proactive_cfg.min_affinity_for_proactive
|
||||
self.enable_morning_greeting = proactive_cfg.enable_morning_greeting
|
||||
self.enable_night_greeting = proactive_cfg.enable_night_greeting
|
||||
|
||||
async def start(self) -> None:
|
||||
"""启动主动思考器"""
|
||||
if self._running:
|
||||
logger.warning("[ProactiveThinker] 已在运行中")
|
||||
logger.info("已在运行中")
|
||||
return
|
||||
|
||||
self._running = True
|
||||
@@ -324,6 +330,7 @@ class ProactiveThinker:
|
||||
return self._get_fallback_thought(elapsed_minutes, progress)
|
||||
|
||||
# 使用统一的文本清理函数
|
||||
from .replyer import _clean_reply_text
|
||||
thought = _clean_reply_text(raw_response)
|
||||
|
||||
logger.debug(f"[ProactiveThinker] LLM 生成等待想法 (model={model_name}): {thought[:50]}...")
|
||||
@@ -369,7 +376,7 @@ class ProactiveThinker:
|
||||
return None
|
||||
|
||||
async def _handle_timeout(self, session: KokoroSession) -> None:
|
||||
"""处理等待超时"""
|
||||
"""处理等待超时 - 支持双模式"""
|
||||
self._stats["timeout_decisions"] += 1
|
||||
|
||||
# 再次检查 Session 状态,防止在等待过程中被 Chatter 处理
|
||||
@@ -385,7 +392,7 @@ class ProactiveThinker:
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(f"[ProactiveThinker] 等待超时: user={session.user_id}")
|
||||
logger.info(f"等待超时: user={session.user_id}")
|
||||
|
||||
try:
|
||||
# 获取用户名
|
||||
@@ -403,22 +410,35 @@ class ProactiveThinker:
|
||||
action_modifier = ActionModifier(action_manager, session.stream_id)
|
||||
await action_modifier.modify_actions(chatter_name="KokoroFlowChatter")
|
||||
|
||||
# 调用 Planner 生成超时决策
|
||||
plan_response = await generate_plan(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type="timeout",
|
||||
chat_stream=chat_stream,
|
||||
available_actions=action_manager.get_using_actions(),
|
||||
)
|
||||
# 根据模式选择生成方式
|
||||
if self._mode == KFCMode.UNIFIED:
|
||||
# 统一模式:单次 LLM 调用
|
||||
from .unified import generate_unified_response
|
||||
plan_response = await generate_unified_response(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type="timeout",
|
||||
chat_stream=chat_stream,
|
||||
available_actions=action_manager.get_using_actions(),
|
||||
)
|
||||
else:
|
||||
# 分离模式:Planner + Replyer
|
||||
from .planner import generate_plan
|
||||
plan_response = await generate_plan(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type="timeout",
|
||||
chat_stream=chat_stream,
|
||||
available_actions=action_manager.get_using_actions(),
|
||||
)
|
||||
|
||||
# 为 kfc_reply 动作注入必要的上下文信息
|
||||
for action in plan_response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
action.params["user_id"] = session.user_id
|
||||
action.params["user_name"] = user_name
|
||||
action.params["thought"] = plan_response.thought
|
||||
action.params["situation_type"] = "timeout"
|
||||
# 分离模式下需要注入上下文信息
|
||||
for action in plan_response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
action.params["user_id"] = session.user_id
|
||||
action.params["user_name"] = user_name
|
||||
action.params["thought"] = plan_response.thought
|
||||
action.params["situation_type"] = "timeout"
|
||||
|
||||
# 执行动作(回复生成在 Action.execute() 中完成)
|
||||
for action in plan_response.actions:
|
||||
@@ -539,7 +559,7 @@ class ProactiveThinker:
|
||||
session: KokoroSession,
|
||||
trigger_reason: str,
|
||||
) -> None:
|
||||
"""处理主动思考"""
|
||||
"""处理主动思考 - 支持双模式"""
|
||||
self._stats["proactive_triggered"] += 1
|
||||
|
||||
# 再次检查最近活动时间,防止与 Chatter 并发
|
||||
@@ -550,7 +570,7 @@ class ProactiveThinker:
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(f"[ProactiveThinker] 主动思考触发: user={session.user_id}, reason={trigger_reason}")
|
||||
logger.info(f"主动思考触发: user={session.user_id}, reason={trigger_reason}")
|
||||
|
||||
try:
|
||||
# 获取用户名
|
||||
@@ -580,15 +600,29 @@ class ProactiveThinker:
|
||||
"silence_duration": silence_duration,
|
||||
}
|
||||
|
||||
# 调用 Planner
|
||||
plan_response = await generate_plan(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type="proactive",
|
||||
chat_stream=chat_stream,
|
||||
available_actions=action_manager.get_using_actions(),
|
||||
extra_context=extra_context,
|
||||
)
|
||||
# 根据模式选择生成方式
|
||||
if self._mode == KFCMode.UNIFIED:
|
||||
# 统一模式:单次 LLM 调用
|
||||
from .unified import generate_unified_response
|
||||
plan_response = await generate_unified_response(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type="proactive",
|
||||
chat_stream=chat_stream,
|
||||
available_actions=action_manager.get_using_actions(),
|
||||
extra_context=extra_context,
|
||||
)
|
||||
else:
|
||||
# 分离模式:Planner + Replyer
|
||||
from .planner import generate_plan
|
||||
plan_response = await generate_plan(
|
||||
session=session,
|
||||
user_name=user_name,
|
||||
situation_type="proactive",
|
||||
chat_stream=chat_stream,
|
||||
available_actions=action_manager.get_using_actions(),
|
||||
extra_context=extra_context,
|
||||
)
|
||||
|
||||
# 检查是否决定不打扰
|
||||
is_do_nothing = (
|
||||
@@ -597,19 +631,20 @@ class ProactiveThinker:
|
||||
)
|
||||
|
||||
if is_do_nothing:
|
||||
logger.info(f"[ProactiveThinker] 决定不打扰: user={session.user_id}")
|
||||
logger.info(f"决定不打扰: user={session.user_id}")
|
||||
session.last_proactive_at = time.time()
|
||||
await self.session_manager.save_session(session.user_id)
|
||||
return
|
||||
|
||||
# 为 kfc_reply 动作注入必要的上下文信息
|
||||
for action in plan_response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
action.params["user_id"] = session.user_id
|
||||
action.params["user_name"] = user_name
|
||||
action.params["thought"] = plan_response.thought
|
||||
action.params["situation_type"] = "proactive"
|
||||
action.params["extra_context"] = extra_context
|
||||
# 分离模式下需要注入上下文信息
|
||||
if self._mode == KFCMode.SPLIT:
|
||||
for action in plan_response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
action.params["user_id"] = session.user_id
|
||||
action.params["user_name"] = user_name
|
||||
action.params["thought"] = plan_response.thought
|
||||
action.params["situation_type"] = "proactive"
|
||||
action.params["extra_context"] = extra_context
|
||||
|
||||
# 执行动作(回复生成在 Action.execute() 中完成)
|
||||
for action in plan_response.actions:
|
||||
|
||||
@@ -827,6 +827,202 @@ class PromptBuilder:
|
||||
target_message=target_message or "(无消息内容)",
|
||||
)
|
||||
|
||||
async def build_unified_prompt(
|
||||
self,
|
||||
session: KokoroSession,
|
||||
user_name: str,
|
||||
situation_type: str = "new_message",
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
available_actions: Optional[dict] = None,
|
||||
extra_context: Optional[dict] = None,
|
||||
) -> str:
|
||||
"""
|
||||
构建统一模式提示词(单次 LLM 调用完成思考 + 回复生成)
|
||||
|
||||
与 planner_prompt 的区别:
|
||||
- 使用完整的输出格式(要求填写 content 字段)
|
||||
- 不使用分离的 replyer 提示词
|
||||
|
||||
Args:
|
||||
session: 会话对象
|
||||
user_name: 用户名称
|
||||
situation_type: 情况类型
|
||||
chat_stream: 聊天流对象
|
||||
available_actions: 可用动作字典
|
||||
extra_context: 额外上下文
|
||||
|
||||
Returns:
|
||||
完整的统一模式提示词
|
||||
"""
|
||||
extra_context = extra_context or {}
|
||||
|
||||
# 获取 user_id
|
||||
user_id = session.user_id if session else None
|
||||
|
||||
# 1. 构建人设块
|
||||
persona_block = self._build_persona_block()
|
||||
|
||||
# 2. 使用 context_builder 获取关系、记忆、表达习惯等
|
||||
context_data = await self._build_context_data(user_name, chat_stream, user_id)
|
||||
relation_block = context_data.get("relation_info", f"你与 {user_name} 还不太熟悉,这是早期的交流阶段。")
|
||||
memory_block = context_data.get("memory_block", "")
|
||||
expression_habits = self._build_combined_expression_block(context_data.get("expression_habits", ""))
|
||||
|
||||
# 3. 构建活动流
|
||||
activity_stream = await self._build_activity_stream(session, user_name)
|
||||
|
||||
# 4. 构建当前情况
|
||||
current_situation = await self._build_current_situation(
|
||||
session, user_name, situation_type, extra_context
|
||||
)
|
||||
|
||||
# 5. 构建聊天历史总览
|
||||
chat_history_block = await self._build_chat_history_block(chat_stream)
|
||||
|
||||
# 6. 构建可用动作(统一模式强调需要填写 content)
|
||||
actions_block = self._build_unified_actions_block(available_actions)
|
||||
|
||||
# 7. 获取统一模式输出格式(要求填写 content)
|
||||
output_format = await self._get_unified_output_format()
|
||||
|
||||
# 8. 使用统一的 prompt 管理系统格式化
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
PROMPT_NAMES["main"],
|
||||
user_name=user_name,
|
||||
persona_block=persona_block,
|
||||
relation_block=relation_block,
|
||||
memory_block=memory_block or "(暂无相关记忆)",
|
||||
expression_habits=expression_habits or "(根据自然对话风格回复即可)",
|
||||
activity_stream=activity_stream or "(这是你们第一次聊天)",
|
||||
current_situation=current_situation,
|
||||
chat_history_block=chat_history_block,
|
||||
available_actions=actions_block,
|
||||
output_format=output_format,
|
||||
)
|
||||
|
||||
return prompt
|
||||
|
||||
def _build_unified_actions_block(self, available_actions: Optional[dict]) -> str:
|
||||
"""
|
||||
构建统一模式的可用动作块
|
||||
|
||||
与 _build_actions_block 的区别:
|
||||
- 强调 kfc_reply 需要填写 content 字段
|
||||
"""
|
||||
if not available_actions:
|
||||
return self._get_unified_default_actions_block()
|
||||
|
||||
action_blocks = []
|
||||
for action_name, action_info in available_actions.items():
|
||||
block = self._format_unified_action(action_name, action_info)
|
||||
if block:
|
||||
action_blocks.append(block)
|
||||
|
||||
return "\n".join(action_blocks) if action_blocks else self._get_unified_default_actions_block()
|
||||
|
||||
def _format_unified_action(self, action_name: str, action_info) -> str:
|
||||
"""格式化统一模式的单个动作"""
|
||||
description = getattr(action_info, "description", "") or f"执行 {action_name}"
|
||||
action_require = getattr(action_info, "action_require", []) or []
|
||||
require_text = "\n".join(f" - {req}" for req in action_require) if action_require else " - 根据情况使用"
|
||||
|
||||
# 统一模式要求 kfc_reply 必须填写 content
|
||||
if action_name == "kfc_reply":
|
||||
return f"""### {action_name}
|
||||
**描述**: {description}
|
||||
|
||||
**使用场景**:
|
||||
{require_text}
|
||||
|
||||
**示例**:
|
||||
```json
|
||||
{{
|
||||
"type": "{action_name}",
|
||||
"content": "你要说的话(必填)"
|
||||
}}
|
||||
```
|
||||
"""
|
||||
else:
|
||||
action_parameters = getattr(action_info, "action_parameters", {}) or {}
|
||||
params_example = self._build_params_example(action_parameters)
|
||||
|
||||
return f"""### {action_name}
|
||||
**描述**: {description}
|
||||
|
||||
**使用场景**:
|
||||
{require_text}
|
||||
|
||||
**示例**:
|
||||
```json
|
||||
{{
|
||||
"type": "{action_name}",
|
||||
{params_example}
|
||||
}}
|
||||
```
|
||||
"""
|
||||
|
||||
def _get_unified_default_actions_block(self) -> str:
|
||||
"""获取统一模式的默认动作列表"""
|
||||
return """### kfc_reply
|
||||
**描述**: 发送回复消息
|
||||
|
||||
**使用场景**:
|
||||
- 需要回复对方消息时使用
|
||||
|
||||
**示例**:
|
||||
```json
|
||||
{
|
||||
"type": "kfc_reply",
|
||||
"content": "你要说的话(必填)"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
### do_nothing
|
||||
**描述**: 什么都不做
|
||||
|
||||
**使用场景**:
|
||||
- 当前不需要回应时使用
|
||||
|
||||
**示例**:
|
||||
```json
|
||||
{
|
||||
"type": "do_nothing"
|
||||
}
|
||||
```"""
|
||||
|
||||
async def _get_unified_output_format(self) -> str:
|
||||
"""获取统一模式的输出格式模板"""
|
||||
try:
|
||||
prompt = await global_prompt_manager.get_prompt_async(
|
||||
PROMPT_NAMES["unified_output_format"]
|
||||
)
|
||||
return prompt.template
|
||||
except KeyError:
|
||||
# 如果模板未注册,返回默认格式
|
||||
return """请用以下 JSON 格式回复:
|
||||
```json
|
||||
{
|
||||
"thought": "你脑子里在想什么,越自然越好",
|
||||
"actions": [
|
||||
{"type": "kfc_reply", "content": "你的回复内容"}
|
||||
],
|
||||
"expected_reaction": "你期待对方的反应是什么",
|
||||
"max_wait_seconds": 等待时间(秒),0 表示不等待
|
||||
}
|
||||
```
|
||||
|
||||
### 字段说明
|
||||
- `thought`:你的内心独白,记录你此刻的想法和感受。要自然,不要技术性语言。
|
||||
- `actions`:你要执行的动作列表。对于 `kfc_reply` 动作,**必须**填写 `content` 字段,写上你要说的话。
|
||||
- `expected_reaction`:你期待对方如何回应(用于判断是否需要等待)
|
||||
- `max_wait_seconds`:设定等待时间(秒),0 表示不等待,超时后你会考虑是否要主动说点什么
|
||||
|
||||
### 注意事项
|
||||
- kfc_reply 的 content 字段是必填的,直接写你要发送的消息内容
|
||||
- 即使什么都不想做,也放一个 `{"type": "do_nothing"}`
|
||||
- 可以组合多个动作,比如先发消息再发表情"""
|
||||
|
||||
|
||||
# 全局单例
|
||||
_prompt_builder: Optional[PromptBuilder] = None
|
||||
|
||||
@@ -361,11 +361,42 @@ kfc_WAITING_THOUGHT = Prompt(
|
||||
现在,请直接输出你等待时的内心想法:""",
|
||||
)
|
||||
|
||||
# =================================================================================================
|
||||
# 统一模式输出格式(单次 LLM 调用,要求填写 content)
|
||||
# =================================================================================================
|
||||
|
||||
kfc_UNIFIED_OUTPUT_FORMAT = Prompt(
|
||||
name="kfc_unified_output_format",
|
||||
template="""请用以下 JSON 格式回复:
|
||||
```json
|
||||
{{
|
||||
"thought": "你脑子里在想什么,越自然越好",
|
||||
"actions": [
|
||||
{{"type": "kfc_reply", "content": "你的回复内容"}}
|
||||
],
|
||||
"expected_reaction": "你期待对方的反应是什么",
|
||||
"max_wait_seconds": 等待时间(秒),0 表示不等待
|
||||
}}
|
||||
```
|
||||
|
||||
### 字段说明
|
||||
- `thought`:你的内心独白,记录你此刻的想法和感受。要自然,不要技术性语言。
|
||||
- `actions`:你要执行的动作列表。对于 `kfc_reply` 动作,**必须**填写 `content` 字段,写上你要说的话。
|
||||
- `expected_reaction`:你期待对方如何回应(用于判断是否需要等待)
|
||||
- `max_wait_seconds`:设定等待时间(秒),0 表示不等待,超时后你会考虑是否要主动说点什么。如果你认为聊天没有继续的必要,或不想打扰对方,可以设为 0。
|
||||
|
||||
### 注意事项
|
||||
- kfc_reply 的 content 字段是**必填**的,直接写你要发送的消息内容
|
||||
- 即使什么都不想做,也放一个 `{{"type": "do_nothing"}}`
|
||||
- 可以组合多个动作,比如先发消息再发表情""",
|
||||
)
|
||||
|
||||
# 导出所有模板名称,方便外部引用
|
||||
PROMPT_NAMES = {
|
||||
"main": "kfc_main",
|
||||
"output_format": "kfc_output_format",
|
||||
"planner_output_format": "kfc_planner_output_format",
|
||||
"unified_output_format": "kfc_unified_output_format",
|
||||
"replyer": "kfc_replyer",
|
||||
"replyer_context_normal": "kfc_replyer_context_normal",
|
||||
"replyer_context_in_time": "kfc_replyer_context_in_time",
|
||||
|
||||
@@ -0,0 +1,607 @@
|
||||
"""
|
||||
Kokoro Flow Chatter - 统一模式提示词模块
|
||||
|
||||
为统一模式(Unified Mode)提供模块化的提示词构建:
|
||||
1. 核心身份模块 - 人设/人格/背景
|
||||
2. 行为准则模块 - 规则/边界
|
||||
3. 情境上下文模块 - 时间/场景/内在状态/关系/记忆
|
||||
4. 动作能力模块 - 可用动作的描述
|
||||
5. 输出格式模块 - 表达风格 + JSON格式
|
||||
|
||||
设计理念:
|
||||
- 每个模块只负责自己的部分,互不干扰
|
||||
- 回复相关内容(人设、上下文)与动作定义分离
|
||||
- 方便独立调试和优化每个部分
|
||||
|
||||
注意:此模块仅用于统一模式,分离模式使用 prompt/builder.py
|
||||
"""
|
||||
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.plugin_system.base.component_types import ActionInfo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.common.data_models.message_manager_data_model import StreamContext
|
||||
|
||||
from .models import MentalLogEntry, EventType
|
||||
from .session import KokoroSession
|
||||
|
||||
logger = get_logger("kfc_prompt_unified")
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 模块1: 核心身份 - 人设/人格/背景
|
||||
# ============================================================
|
||||
|
||||
def build_identity_module() -> str:
|
||||
"""
|
||||
构建核心身份模块
|
||||
|
||||
包含:人格核心、人格细节、外貌身份、世界观背景
|
||||
这些是回复内容的基础,决定了"你是谁"
|
||||
"""
|
||||
if global_config is None:
|
||||
raise RuntimeError("global_config 未初始化")
|
||||
|
||||
personality_cfg = global_config.personality
|
||||
|
||||
personality_core = personality_cfg.personality_core or "你是一个温暖、真诚的对话伙伴。"
|
||||
personality_side = personality_cfg.personality_side or ""
|
||||
identity = personality_cfg.identity or ""
|
||||
background_story = personality_cfg.background_story or ""
|
||||
|
||||
parts = [personality_core]
|
||||
|
||||
if personality_side:
|
||||
parts.append(f"\n{personality_side}")
|
||||
|
||||
if identity:
|
||||
parts.append(f"\n{identity}")
|
||||
|
||||
if background_story:
|
||||
parts.append(f"\n### 背景故事\n{background_story}")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 模块2: 行为准则 - 规则/边界/KFC特有准则
|
||||
# ============================================================
|
||||
|
||||
def build_rules_module() -> str:
|
||||
"""
|
||||
构建行为准则模块
|
||||
|
||||
包含:安全规则 + KFC 通用准则
|
||||
这些是行为的边界,决定了"什么能做什么不能做"
|
||||
"""
|
||||
if global_config is None:
|
||||
raise RuntimeError("global_config 未初始化")
|
||||
|
||||
safety_guidelines = global_config.personality.safety_guidelines or []
|
||||
if isinstance(safety_guidelines, list):
|
||||
safety_guidelines_str = "\n".join(f"- {rule}" for rule in safety_guidelines)
|
||||
else:
|
||||
safety_guidelines_str = str(safety_guidelines)
|
||||
|
||||
# KFC 通用准则
|
||||
kfc_rules = """- 说话要自然,像平时发微信一样
|
||||
- 有自己的边界感,不用无条件迎合
|
||||
- 称呼对方用自然的昵称,别念复杂的用户名
|
||||
- 不要模仿聊天记录里的系统格式(比如"[表情包:xxx]"这种是系统标记,要发送表情包请使用emoji动作)"""
|
||||
|
||||
return f"""{safety_guidelines_str}
|
||||
|
||||
{kfc_rules}"""
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 模块3: 情境上下文 - 时间/场景/内在状态/关系/记忆
|
||||
# ============================================================
|
||||
|
||||
def build_context_module(
|
||||
session: KokoroSession,
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
context_data: Optional[dict[str, str]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
构建情境上下文模块
|
||||
|
||||
包含:当前时间、聊天场景、内在状态、关系信息、记忆
|
||||
这些是回复的上下文,决定了"当前在什么情况下"
|
||||
|
||||
Args:
|
||||
session: 当前会话
|
||||
chat_stream: 聊天流(判断群聊/私聊)
|
||||
context_data: S4U 上下文数据
|
||||
"""
|
||||
context_data = context_data or {}
|
||||
|
||||
# 时间和场景
|
||||
current_time = datetime.now().strftime("%Y年%m月%d日 %H:%M:%S")
|
||||
is_group_chat = bool(chat_stream and chat_stream.group_info)
|
||||
chat_scene = "你在群里聊天" if is_group_chat else "你在和对方私聊"
|
||||
|
||||
# 日程(如果有)
|
||||
schedule_block = context_data.get("schedule", "")
|
||||
|
||||
# 内在状态(从 context_data 获取,如果没有使用默认值)
|
||||
mood = context_data.get("mood", "平静")
|
||||
|
||||
# 关系信息
|
||||
relation_info = context_data.get("relation_info", "")
|
||||
|
||||
# 记忆
|
||||
memory_block = context_data.get("memory_block", "")
|
||||
|
||||
parts = []
|
||||
|
||||
# 时间和场景
|
||||
parts.append(f"**时间**: {current_time}")
|
||||
parts.append(f"**场景**: {chat_scene}")
|
||||
|
||||
# 日程块
|
||||
if schedule_block:
|
||||
parts.append(f"{schedule_block}")
|
||||
|
||||
# 内在状态
|
||||
parts.append(f"\n你现在的心情:{mood}")
|
||||
|
||||
# 关系信息
|
||||
if relation_info:
|
||||
parts.append(f"\n## 4. 你和对方的关系\n{relation_info}")
|
||||
|
||||
# 记忆
|
||||
if memory_block:
|
||||
parts.append(f"\n{memory_block}")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 模块4: 动作能力 - 可用动作的描述
|
||||
# ============================================================
|
||||
|
||||
def build_actions_module(available_actions: Optional[dict[str, ActionInfo]] = None) -> str:
|
||||
"""
|
||||
构建动作能力模块
|
||||
|
||||
包含:所有可用动作的描述、参数、示例
|
||||
这部分与回复内容分离,只描述"能做什么"
|
||||
|
||||
Args:
|
||||
available_actions: 可用动作字典
|
||||
"""
|
||||
if not available_actions:
|
||||
return _get_default_actions_block()
|
||||
|
||||
action_blocks = []
|
||||
|
||||
for action_name, action_info in available_actions.items():
|
||||
description = action_info.description or f"执行 {action_name}"
|
||||
|
||||
# 构建动作块
|
||||
action_block = f"### `{action_name}` - {description}"
|
||||
|
||||
# 参数说明(如果有)
|
||||
if action_info.action_parameters:
|
||||
params_lines = [f" - `{name}`: {desc}" for name, desc in action_info.action_parameters.items()]
|
||||
action_block += f"\n参数:\n{chr(10).join(params_lines)}"
|
||||
|
||||
# 使用场景(如果有)
|
||||
if action_info.action_require:
|
||||
require_lines = [f" - {req}" for req in action_info.action_require]
|
||||
action_block += f"\n使用场景:\n{chr(10).join(require_lines)}"
|
||||
|
||||
# 示例
|
||||
example_params = ""
|
||||
if action_info.action_parameters:
|
||||
param_examples = [f'"{name}": "..."' for name in action_info.action_parameters.keys()]
|
||||
example_params = ", " + ", ".join(param_examples)
|
||||
|
||||
action_block += f'\n```json\n{{"type": "{action_name}"{example_params}}}\n```'
|
||||
|
||||
action_blocks.append(action_block)
|
||||
|
||||
return "\n\n".join(action_blocks)
|
||||
|
||||
|
||||
def _get_default_actions_block() -> str:
|
||||
"""获取默认的内置动作描述块"""
|
||||
return """### `kfc_reply` - 发消息
|
||||
发送文字回复。
|
||||
|
||||
**重要**:像真人发微信一样,把你想说的话拆成几条短消息,每条用一个 kfc_reply 动作。
|
||||
|
||||
什么时候分?
|
||||
- 说完一句话后想补充一句 → 分
|
||||
- 语气转折了 → 分
|
||||
- 想让对方先看到前面的,再看到后面的 → 分
|
||||
- 一个意思说完了 → 分
|
||||
|
||||
不需要分的时候:
|
||||
- 就一句话的事 → 一条就够
|
||||
- 话很短,不用拆 → 一条就够
|
||||
|
||||
示例:
|
||||
```json
|
||||
{
|
||||
"actions": [
|
||||
{"type": "kfc_reply", "content": "嗯嗯~"},
|
||||
{"type": "kfc_reply", "content": "我知道了"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### `poke_user` - 戳一戳
|
||||
戳对方一下
|
||||
```json
|
||||
{"type": "poke_user"}
|
||||
```
|
||||
|
||||
### `update_internal_state` - 更新你的心情
|
||||
更新你现在的心情状态
|
||||
```json
|
||||
{"type": "update_internal_state", "mood": "开心"}
|
||||
```
|
||||
|
||||
### `do_nothing` - 不做任何事
|
||||
想了想,决定现在不作回应
|
||||
```json
|
||||
{"type": "do_nothing"}
|
||||
```"""
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 模块5: 表达与输出格式 - 回复风格 + JSON格式
|
||||
# ============================================================
|
||||
|
||||
def build_output_module(context_data: Optional[dict[str, str]] = None) -> str:
|
||||
"""
|
||||
构建输出格式模块
|
||||
|
||||
包含:表达风格、表达习惯、JSON 输出格式要求
|
||||
这部分定义了"怎么说"和"输出什么格式"
|
||||
|
||||
Args:
|
||||
context_data: S4U 上下文数据(包含 expression_habits)
|
||||
"""
|
||||
if global_config is None:
|
||||
raise RuntimeError("global_config 未初始化")
|
||||
|
||||
context_data = context_data or {}
|
||||
|
||||
reply_style = global_config.personality.reply_style or ""
|
||||
expression_habits = context_data.get("expression_habits", "")
|
||||
|
||||
# JSON 输出格式说明(更自然的思考引导)
|
||||
json_format = """### 输出格式(JSON)
|
||||
```json
|
||||
{
|
||||
"thought": "你心里的真实想法,像日记一样自然",
|
||||
"expected_user_reaction": "猜猜对方看到会怎么想",
|
||||
"max_wait_seconds": 等多久(60-900),不等就填0,
|
||||
"actions": [
|
||||
{"type": "kfc_reply", "content": "你要说的话"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
关于 thought(内心想法):
|
||||
- 写你真正在想的,不是在分析任务
|
||||
- 像心里嘀咕一样,比如"这家伙又来撒娇了~" "有点困了但还想再聊会儿"
|
||||
- 不要写"根据设定""我需要""我应该"这种规划性的话
|
||||
- 就是你作为这个人,此刻心里在想什么
|
||||
|
||||
关于 expected_user_reaction(预期反应):
|
||||
- 随便猜一下对方可能的反应就行
|
||||
- 比如"可能会撒娇""估计要睡了""应该会觉得好笑"
|
||||
- 不用分析得很正式
|
||||
|
||||
不想做任何事就用 `{"type": "do_nothing"}`"""
|
||||
|
||||
parts = []
|
||||
|
||||
if reply_style:
|
||||
parts.append(reply_style)
|
||||
|
||||
if expression_habits:
|
||||
parts.append(expression_habits)
|
||||
|
||||
parts.append(json_format)
|
||||
|
||||
return "\n\n".join(parts)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 组装完整的 System Prompt(复刻旧版)
|
||||
# ============================================================
|
||||
|
||||
def build_system_prompt(
|
||||
session: KokoroSession,
|
||||
available_actions: Optional[dict[str, ActionInfo]] = None,
|
||||
context_data: Optional[dict[str, str]] = None,
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
组装完整的系统提示词
|
||||
|
||||
按模块顺序组装:
|
||||
1. 角色框架
|
||||
2. 核心身份(人设)
|
||||
3. 行为准则(规则)
|
||||
4. 情境上下文(时间/状态/关系)
|
||||
5. 动作能力(可用动作)
|
||||
6. 输出格式(表达风格 + JSON)
|
||||
|
||||
Args:
|
||||
session: 当前会话
|
||||
available_actions: 可用动作字典
|
||||
context_data: S4U 上下文数据
|
||||
chat_stream: 聊天流
|
||||
|
||||
Returns:
|
||||
完整的系统提示词
|
||||
"""
|
||||
# 角色框架(不提AI,直接进入角色)
|
||||
role_frame = "你是一个真实的人,有自己的生活、想法和情绪。像平时和朋友聊天一样自然地交流。"
|
||||
|
||||
# 组装各模块
|
||||
modules = [
|
||||
role_frame,
|
||||
"",
|
||||
"## 1. 你是谁",
|
||||
build_identity_module(),
|
||||
"",
|
||||
"## 2. 规则",
|
||||
build_rules_module(),
|
||||
"",
|
||||
"## 3. 现在的情况",
|
||||
build_context_module(session, chat_stream, context_data),
|
||||
"",
|
||||
"## 5. 你能做的事",
|
||||
build_actions_module(available_actions),
|
||||
"",
|
||||
"## 6. 怎么回复",
|
||||
build_output_module(context_data),
|
||||
]
|
||||
|
||||
return "\n".join(modules)
|
||||
|
||||
|
||||
# ============================================================
|
||||
# User Prompt 模板(复刻旧版)
|
||||
# ============================================================
|
||||
|
||||
RESPONDING_USER_PROMPT_TEMPLATE = """## 聊天记录
|
||||
{narrative_history}
|
||||
|
||||
## 新消息
|
||||
{incoming_messages}
|
||||
|
||||
---
|
||||
看完这些消息,你想怎么回应?用 JSON 输出你的想法和决策。"""
|
||||
|
||||
|
||||
TIMEOUT_DECISION_USER_PROMPT_TEMPLATE = """## 聊天记录
|
||||
{narrative_history}
|
||||
|
||||
## 现在的情况
|
||||
你发了消息,等了 {wait_duration_seconds:.0f} 秒({wait_duration_minutes:.1f} 分钟),对方还没回。
|
||||
你之前觉得对方可能会:{expected_user_reaction}
|
||||
|
||||
{followup_warning}
|
||||
|
||||
你发的最后一条:{last_bot_message}
|
||||
|
||||
---
|
||||
你拿起手机看了一眼,发现对方还没回复。你想怎么办?
|
||||
|
||||
选项:
|
||||
1. **继续等** - 用 `do_nothing`,设个 `max_wait_seconds` 等一会儿再看
|
||||
2. **发消息** - 用 `reply`,不过别太频繁追问
|
||||
3. **算了不等了** - 用 `do_nothing`,`max_wait_seconds` 设为 0
|
||||
|
||||
用 JSON 输出你的想法和决策。"""
|
||||
|
||||
|
||||
PROACTIVE_THINKING_USER_PROMPT_TEMPLATE = """## 聊天记录
|
||||
{narrative_history}
|
||||
|
||||
## 现在的情况
|
||||
现在是 {current_time},距离你们上次聊天已经过了 {silence_duration}。
|
||||
{relation_block}
|
||||
|
||||
{trigger_context}
|
||||
|
||||
---
|
||||
你突然想起了对方。要不要联系一下?
|
||||
|
||||
说实话,不联系也完全没问题——不打扰也是一种温柔。
|
||||
如果决定联系,想好说什么,要自然一点。
|
||||
|
||||
用 JSON 输出你的想法和决策。不想发消息就用 `do_nothing`。"""
|
||||
|
||||
|
||||
# ============================================================
|
||||
# 格式化历史记录
|
||||
# ============================================================
|
||||
|
||||
def format_narrative_history(
|
||||
mental_log: list[MentalLogEntry],
|
||||
max_entries: int = 15,
|
||||
) -> str:
|
||||
"""
|
||||
将心理活动日志格式化为叙事历史
|
||||
|
||||
Args:
|
||||
mental_log: 心理活动日志列表
|
||||
max_entries: 最大条目数
|
||||
|
||||
Returns:
|
||||
str: 格式化的叙事历史文本
|
||||
"""
|
||||
if not mental_log:
|
||||
return "(这是对话的开始,还没有历史记录)"
|
||||
|
||||
# 获取最近的日志条目
|
||||
recent_entries = mental_log[-max_entries:]
|
||||
|
||||
narrative_parts = []
|
||||
for entry in recent_entries:
|
||||
timestamp_str = time.strftime(
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
time.localtime(entry.timestamp)
|
||||
)
|
||||
|
||||
if entry.event_type == EventType.USER_MESSAGE:
|
||||
user_name = entry.user_name or "用户"
|
||||
narrative_parts.append(
|
||||
f"[{timestamp_str}] {user_name}说:{entry.content}"
|
||||
)
|
||||
elif entry.event_type == EventType.BOT_PLANNING:
|
||||
if entry.thought:
|
||||
narrative_parts.append(
|
||||
f"[{timestamp_str}] (你的内心:{entry.thought})"
|
||||
)
|
||||
# 格式化动作
|
||||
for action in entry.actions:
|
||||
action_type = action.get("type", "")
|
||||
if action_type == "kfc_reply" or action_type == "reply":
|
||||
content = action.get("content", "")
|
||||
if content:
|
||||
narrative_parts.append(
|
||||
f"[{timestamp_str}] 你回复:{content}"
|
||||
)
|
||||
elif entry.event_type == EventType.WAITING_UPDATE:
|
||||
if entry.waiting_thought:
|
||||
narrative_parts.append(
|
||||
f"[{timestamp_str}] (等待中的想法:{entry.waiting_thought})"
|
||||
)
|
||||
|
||||
return "\n".join(narrative_parts) if narrative_parts else "(这是对话的开始,还没有历史记录)"
|
||||
|
||||
|
||||
def format_history_from_context(
|
||||
context: "StreamContext",
|
||||
mental_log: Optional[list[MentalLogEntry]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
从 StreamContext 的历史消息构建叙事历史
|
||||
|
||||
这是实现"无缝融入"的关键:
|
||||
- 从同一个数据库读取历史消息(与AFC共享)
|
||||
- 遵循全局配置 [chat].max_context_size
|
||||
- 将消息串渲染成KFC的叙事体格式
|
||||
|
||||
Args:
|
||||
context: 聊天流上下文,包含共享的历史消息
|
||||
mental_log: 可选的心理活动日志,用于补充内心独白
|
||||
|
||||
Returns:
|
||||
str: 格式化的叙事历史文本
|
||||
"""
|
||||
# 从 StreamContext 获取历史消息,遵循全局上下文长度配置
|
||||
max_context = 25 # 默认值
|
||||
if global_config and hasattr(global_config, 'chat') and global_config.chat:
|
||||
max_context = getattr(global_config.chat, "max_context_size", 25)
|
||||
history_messages = context.get_messages(limit=max_context, include_unread=False)
|
||||
|
||||
if not history_messages and not mental_log:
|
||||
return "(这是对话的开始,还没有历史记录)"
|
||||
|
||||
# 获取Bot的用户ID用于判断消息来源
|
||||
bot_user_id = None
|
||||
if global_config and hasattr(global_config, 'bot') and global_config.bot:
|
||||
bot_user_id = str(getattr(global_config.bot, 'qq_account', ''))
|
||||
|
||||
narrative_parts = []
|
||||
|
||||
# 首先,将数据库历史消息转换为叙事格式
|
||||
for msg in history_messages:
|
||||
timestamp_str = time.strftime(
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
time.localtime(msg.time or time.time())
|
||||
)
|
||||
|
||||
# 判断是用户消息还是Bot消息
|
||||
msg_user_id = str(msg.user_info.user_id) if msg.user_info else ""
|
||||
is_bot_message = bot_user_id and msg_user_id == bot_user_id
|
||||
content = msg.processed_plain_text or msg.display_message or ""
|
||||
|
||||
if is_bot_message:
|
||||
narrative_parts.append(f"[{timestamp_str}] 你回复:{content}")
|
||||
else:
|
||||
sender_name = msg.user_info.user_nickname if msg.user_info else "用户"
|
||||
narrative_parts.append(f"[{timestamp_str}] {sender_name}说:{content}")
|
||||
|
||||
# 然后,补充 mental_log 中的内心独白(如果有)
|
||||
if mental_log:
|
||||
for entry in mental_log[-5:]: # 只取最近5条心理活动
|
||||
timestamp_str = time.strftime(
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
time.localtime(entry.timestamp)
|
||||
)
|
||||
|
||||
if entry.event_type == EventType.BOT_PLANNING and entry.thought:
|
||||
narrative_parts.append(f"[{timestamp_str}] (你的内心:{entry.thought})")
|
||||
|
||||
return "\n".join(narrative_parts) if narrative_parts else "(这是对话的开始,还没有历史记录)"
|
||||
|
||||
|
||||
def format_incoming_messages(
|
||||
message_content: str,
|
||||
sender_name: str,
|
||||
sender_id: str,
|
||||
message_time: Optional[float] = None,
|
||||
all_unread_messages: Optional[list] = None,
|
||||
) -> str:
|
||||
"""
|
||||
格式化收到的消息
|
||||
|
||||
支持单条消息(兼容旧调用)和多条消息(打断合并场景)
|
||||
|
||||
Args:
|
||||
message_content: 主消息内容
|
||||
sender_name: 发送者名称
|
||||
sender_id: 发送者ID
|
||||
message_time: 消息时间戳
|
||||
all_unread_messages: 所有未读消息列表
|
||||
|
||||
Returns:
|
||||
str: 格式化的消息文本
|
||||
"""
|
||||
if message_time is None:
|
||||
message_time = time.time()
|
||||
|
||||
# 如果有多条消息,格式化为消息组
|
||||
if all_unread_messages and len(all_unread_messages) > 1:
|
||||
lines = [f"**用户连续发送了 {len(all_unread_messages)} 条消息:**\n"]
|
||||
|
||||
for i, msg in enumerate(all_unread_messages, 1):
|
||||
msg_time = msg.time or time.time()
|
||||
msg_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(msg_time))
|
||||
msg_sender = msg.user_info.user_nickname if msg.user_info else sender_name
|
||||
msg_content = msg.processed_plain_text or msg.display_message or ""
|
||||
|
||||
lines.append(f"[{i}] 来自:{msg_sender}")
|
||||
lines.append(f" 时间:{msg_time_str}")
|
||||
lines.append(f" 内容:{msg_content}")
|
||||
lines.append("")
|
||||
|
||||
lines.append("**提示**:请综合理解这些消息的整体意图,不需要逐条回复。")
|
||||
return "\n".join(lines)
|
||||
|
||||
# 单条消息(兼容旧格式)
|
||||
message_time_str = time.strftime(
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
time.localtime(message_time)
|
||||
)
|
||||
return f"""来自:{sender_name}(用户ID: {sender_id})
|
||||
时间:{message_time_str}
|
||||
内容:{message_content}"""
|
||||
@@ -82,7 +82,8 @@ async def generate_reply_text(
|
||||
# 3. 清理并返回回复文本
|
||||
reply_text = _clean_reply_text(raw_response)
|
||||
|
||||
logger.info(f"[KFC Replyer] 生成成功 (model={model_name}): {reply_text[:50]}...")
|
||||
# 使用 logger 输出美化日志(颜色通过 logger 系统配置)
|
||||
logger.info(f"💬 {reply_text}")
|
||||
|
||||
return True, reply_text
|
||||
|
||||
|
||||
578
src/plugins/built_in/kokoro_flow_chatter/unified.py
Normal file
578
src/plugins/built_in/kokoro_flow_chatter/unified.py
Normal file
@@ -0,0 +1,578 @@
|
||||
"""
|
||||
Kokoro Flow Chatter - 统一模式
|
||||
|
||||
统一模式(Unified Mode):
|
||||
- 使用模块化的提示词组件构建提示词
|
||||
- System Prompt + User Prompt 的标准结构
|
||||
- 一次 LLM 调用完成思考 + 回复生成
|
||||
- 输出 JSON 格式:thought + actions + max_wait_seconds
|
||||
|
||||
与分离模式(Split Mode)的区别:
|
||||
- 统一模式:一次调用完成所有工作,actions 中直接包含回复内容
|
||||
- 分离模式:Planner + Replyer 两次调用,先规划再生成回复
|
||||
"""
|
||||
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.plugin_system.apis import llm_api
|
||||
from src.utils.json_parser import extract_and_parse_json
|
||||
|
||||
from .models import LLMResponse, EventType
|
||||
from .session import KokoroSession
|
||||
|
||||
# 统一模式专用的提示词模块
|
||||
from . import prompt_modules_unified as prompt_modules
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from src.chat.message_receive.chat_stream import ChatStream
|
||||
from src.common.data_models.message_manager_data_model import StreamContext
|
||||
from src.plugin_system.base.component_types import ActionInfo
|
||||
|
||||
logger = get_logger("kfc_unified")
|
||||
|
||||
|
||||
class UnifiedPromptGenerator:
|
||||
"""
|
||||
统一模式提示词生成器
|
||||
|
||||
为统一模式构建提示词:
|
||||
- generate_system_prompt: 构建系统提示词
|
||||
- generate_responding_prompt: 回应消息场景
|
||||
- generate_timeout_prompt: 超时决策场景
|
||||
- generate_proactive_prompt: 主动思考场景
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
async def generate_system_prompt(
|
||||
self,
|
||||
session: KokoroSession,
|
||||
available_actions: Optional[dict] = None,
|
||||
context_data: Optional[dict[str, str]] = None,
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
) -> str:
|
||||
"""
|
||||
生成系统提示词
|
||||
|
||||
使用 prompt_modules.build_system_prompt() 构建模块化的提示词
|
||||
"""
|
||||
return prompt_modules.build_system_prompt(
|
||||
session=session,
|
||||
available_actions=available_actions,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
)
|
||||
|
||||
async def generate_responding_prompt(
|
||||
self,
|
||||
session: KokoroSession,
|
||||
message_content: str,
|
||||
sender_name: str,
|
||||
sender_id: str,
|
||||
message_time: Optional[float] = None,
|
||||
available_actions: Optional[dict] = None,
|
||||
context: Optional["StreamContext"] = None,
|
||||
context_data: Optional[dict[str, str]] = None,
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
all_unread_messages: Optional[list] = None,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
生成回应消息场景的提示词
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: (系统提示词, 用户提示词)
|
||||
"""
|
||||
# 生成系统提示词
|
||||
system_prompt = await self.generate_system_prompt(
|
||||
session,
|
||||
available_actions,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
)
|
||||
|
||||
# 构建叙事历史
|
||||
if context:
|
||||
narrative_history = prompt_modules.format_history_from_context(
|
||||
context, session.mental_log
|
||||
)
|
||||
else:
|
||||
narrative_history = prompt_modules.format_narrative_history(session.mental_log)
|
||||
|
||||
# 格式化收到的消息
|
||||
incoming_messages = prompt_modules.format_incoming_messages(
|
||||
message_content=message_content,
|
||||
sender_name=sender_name,
|
||||
sender_id=sender_id,
|
||||
message_time=message_time,
|
||||
all_unread_messages=all_unread_messages,
|
||||
)
|
||||
|
||||
# 使用用户提示词模板
|
||||
user_prompt = prompt_modules.RESPONDING_USER_PROMPT_TEMPLATE.format(
|
||||
narrative_history=narrative_history,
|
||||
incoming_messages=incoming_messages,
|
||||
)
|
||||
|
||||
return system_prompt, user_prompt
|
||||
|
||||
async def generate_timeout_prompt(
|
||||
self,
|
||||
session: KokoroSession,
|
||||
available_actions: Optional[dict] = None,
|
||||
context_data: Optional[dict[str, str]] = None,
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
生成超时决策场景的提示词
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: (系统提示词, 用户提示词)
|
||||
"""
|
||||
# 生成系统提示词
|
||||
system_prompt = await self.generate_system_prompt(
|
||||
session,
|
||||
available_actions,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
)
|
||||
|
||||
# 构建叙事历史
|
||||
narrative_history = prompt_modules.format_narrative_history(session.mental_log)
|
||||
|
||||
# 计算等待时间
|
||||
wait_duration = session.waiting_config.get_elapsed_seconds()
|
||||
|
||||
# 生成连续追问警告(使用 waiting_config.thinking_count 作为追问计数)
|
||||
followup_count = session.waiting_config.thinking_count
|
||||
max_followups = 3 # 最多追问3次
|
||||
|
||||
if followup_count >= max_followups:
|
||||
followup_warning = f"""⚠️ **重要提醒**:
|
||||
你已经连续追问了 {followup_count} 次,对方都没有回复。
|
||||
**强烈建议不要再发消息了**——继续追问会显得很缠人、很不尊重对方的空间。
|
||||
对方可能真的在忙,或者暂时不想回复,这都是正常的。
|
||||
请选择 `do_nothing` 继续等待,或者直接结束对话(设置 `max_wait_seconds: 0`)。"""
|
||||
elif followup_count > 0:
|
||||
followup_warning = f"""📝 提示:这已经是你第 {followup_count + 1} 次等待对方回复了。
|
||||
如果对方持续没有回应,可能真的在忙或不方便,不需要急着追问。"""
|
||||
else:
|
||||
followup_warning = ""
|
||||
|
||||
# 获取最后一条 Bot 消息
|
||||
last_bot_message = "(没有记录)"
|
||||
for entry in reversed(session.mental_log):
|
||||
if entry.event_type == EventType.BOT_PLANNING:
|
||||
for action in entry.actions:
|
||||
if action.get("type") in ("reply", "kfc_reply"):
|
||||
content = action.get("content", "")
|
||||
if content:
|
||||
last_bot_message = content
|
||||
break
|
||||
if last_bot_message != "(没有记录)":
|
||||
break
|
||||
|
||||
# 使用用户提示词模板
|
||||
user_prompt = prompt_modules.TIMEOUT_DECISION_USER_PROMPT_TEMPLATE.format(
|
||||
narrative_history=narrative_history,
|
||||
wait_duration_seconds=wait_duration,
|
||||
wait_duration_minutes=wait_duration / 60,
|
||||
expected_user_reaction=session.waiting_config.expected_reaction or "不确定",
|
||||
followup_warning=followup_warning,
|
||||
last_bot_message=last_bot_message,
|
||||
)
|
||||
|
||||
return system_prompt, user_prompt
|
||||
|
||||
async def generate_proactive_prompt(
|
||||
self,
|
||||
session: KokoroSession,
|
||||
trigger_context: str,
|
||||
available_actions: Optional[dict] = None,
|
||||
context_data: Optional[dict[str, str]] = None,
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
生成主动思考场景的提示词
|
||||
|
||||
Returns:
|
||||
tuple[str, str]: (系统提示词, 用户提示词)
|
||||
"""
|
||||
# 生成系统提示词
|
||||
system_prompt = await self.generate_system_prompt(
|
||||
session,
|
||||
available_actions,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
)
|
||||
|
||||
# 构建叙事历史
|
||||
narrative_history = prompt_modules.format_narrative_history(
|
||||
session.mental_log, max_entries=10
|
||||
)
|
||||
|
||||
# 计算沉默时长
|
||||
silence_seconds = time.time() - session.last_activity_at
|
||||
if silence_seconds < 3600:
|
||||
silence_duration = f"{silence_seconds / 60:.0f}分钟"
|
||||
else:
|
||||
silence_duration = f"{silence_seconds / 3600:.1f}小时"
|
||||
|
||||
# 当前时间
|
||||
current_time = datetime.now().strftime("%Y年%m月%d日 %H:%M")
|
||||
|
||||
# 从 context_data 获取关系信息
|
||||
relation_block = ""
|
||||
if context_data:
|
||||
relation_info = context_data.get("relation_info", "")
|
||||
if relation_info:
|
||||
relation_block = f"### 你与对方的关系\n{relation_info}"
|
||||
|
||||
if not relation_block:
|
||||
# 回退:使用默认关系描述
|
||||
relation_block = """### 你与对方的关系
|
||||
- 你们还不太熟悉
|
||||
- 正在慢慢了解中"""
|
||||
|
||||
# 使用用户提示词模板
|
||||
user_prompt = prompt_modules.PROACTIVE_THINKING_USER_PROMPT_TEMPLATE.format(
|
||||
narrative_history=narrative_history,
|
||||
current_time=current_time,
|
||||
silence_duration=silence_duration,
|
||||
relation_block=relation_block,
|
||||
trigger_context=trigger_context,
|
||||
)
|
||||
|
||||
return system_prompt, user_prompt
|
||||
|
||||
def build_messages_for_llm(
|
||||
self,
|
||||
system_prompt: str,
|
||||
user_prompt: str,
|
||||
stream_id: str = "",
|
||||
) -> str:
|
||||
"""
|
||||
构建 LLM 请求的完整提示词
|
||||
|
||||
将 system + user 合并为单个提示词字符串
|
||||
"""
|
||||
# 合并提示词
|
||||
full_prompt = f"{system_prompt}\n\n---\n\n{user_prompt}"
|
||||
|
||||
# DEBUG日志:打印完整的KFC提示词(只在 DEBUG 级别输出)
|
||||
logger.debug(
|
||||
f"Final KFC prompt constructed for stream {stream_id}:\n"
|
||||
f"--- PROMPT START ---\n"
|
||||
f"{full_prompt}\n"
|
||||
f"--- PROMPT END ---"
|
||||
)
|
||||
|
||||
return full_prompt
|
||||
|
||||
|
||||
# 全局提示词生成器实例
|
||||
_prompt_generator: Optional[UnifiedPromptGenerator] = None
|
||||
|
||||
|
||||
def get_unified_prompt_generator() -> UnifiedPromptGenerator:
|
||||
"""获取全局提示词生成器实例"""
|
||||
global _prompt_generator
|
||||
if _prompt_generator is None:
|
||||
_prompt_generator = UnifiedPromptGenerator()
|
||||
return _prompt_generator
|
||||
|
||||
|
||||
async def generate_unified_response(
|
||||
session: KokoroSession,
|
||||
user_name: str,
|
||||
situation_type: str = "new_message",
|
||||
chat_stream: Optional["ChatStream"] = None,
|
||||
available_actions: Optional[dict] = None,
|
||||
extra_context: Optional[dict] = None,
|
||||
) -> LLMResponse:
|
||||
"""
|
||||
统一模式:单次 LLM 调用生成完整响应
|
||||
|
||||
调用方式:
|
||||
- 使用 UnifiedPromptGenerator 生成 System + User 提示词
|
||||
- 使用 replyer 模型调用 LLM
|
||||
- 解析 JSON 响应(thought + actions + max_wait_seconds)
|
||||
|
||||
Args:
|
||||
session: 会话对象
|
||||
user_name: 用户名称
|
||||
situation_type: 情况类型 (new_message/timeout/proactive)
|
||||
chat_stream: 聊天流对象
|
||||
available_actions: 可用动作字典
|
||||
extra_context: 额外上下文
|
||||
|
||||
Returns:
|
||||
LLMResponse 对象,包含完整的思考和动作
|
||||
"""
|
||||
try:
|
||||
prompt_generator = get_unified_prompt_generator()
|
||||
extra_context = extra_context or {}
|
||||
|
||||
# 获取上下文数据(关系、记忆等)
|
||||
context_data = await _build_context_data(user_name, chat_stream, session.user_id)
|
||||
|
||||
# 根据情况类型选择提示词生成方法
|
||||
if situation_type == "timeout":
|
||||
system_prompt, user_prompt = await prompt_generator.generate_timeout_prompt(
|
||||
session=session,
|
||||
available_actions=available_actions,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
)
|
||||
elif situation_type == "proactive":
|
||||
trigger_context = extra_context.get("trigger_reason", "")
|
||||
system_prompt, user_prompt = await prompt_generator.generate_proactive_prompt(
|
||||
session=session,
|
||||
trigger_context=trigger_context,
|
||||
available_actions=available_actions,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
)
|
||||
else:
|
||||
# 默认为回应消息场景 (new_message, reply_in_time, reply_late)
|
||||
# 获取最后一条用户消息
|
||||
message_content, sender_name, sender_id, message_time, all_unread = _get_last_user_message(
|
||||
session, user_name, chat_stream
|
||||
)
|
||||
|
||||
system_prompt, user_prompt = await prompt_generator.generate_responding_prompt(
|
||||
session=session,
|
||||
message_content=message_content,
|
||||
sender_name=sender_name,
|
||||
sender_id=sender_id,
|
||||
message_time=message_time,
|
||||
available_actions=available_actions,
|
||||
context=chat_stream.context if chat_stream else None,
|
||||
context_data=context_data,
|
||||
chat_stream=chat_stream,
|
||||
all_unread_messages=all_unread,
|
||||
)
|
||||
|
||||
# 构建完整提示词
|
||||
prompt = prompt_generator.build_messages_for_llm(
|
||||
system_prompt,
|
||||
user_prompt,
|
||||
stream_id=chat_stream.stream_id if chat_stream else "",
|
||||
)
|
||||
|
||||
# 显示提示词(调试模式 - 只有在配置中开启时才输出)
|
||||
if global_config and global_config.debug.show_prompt:
|
||||
logger.info(
|
||||
f"[KFC] 完整提示词 (stream={chat_stream.stream_id if chat_stream else 'unknown'}):\n"
|
||||
f"--- PROMPT START ---\n"
|
||||
f"{prompt}\n"
|
||||
f"--- PROMPT END ---"
|
||||
)
|
||||
|
||||
# 获取 replyer 模型配置并调用 LLM
|
||||
models = llm_api.get_available_models()
|
||||
replyer_config = models.get("replyer")
|
||||
|
||||
if not replyer_config:
|
||||
logger.error("[KFC Unified] 未找到 replyer 模型配置")
|
||||
return LLMResponse.create_error_response("未找到 replyer 模型配置")
|
||||
|
||||
# 调用 LLM(使用合并后的提示词)
|
||||
success, raw_response, reasoning, model_name = await llm_api.generate_with_model(
|
||||
prompt=prompt,
|
||||
model_config=replyer_config,
|
||||
request_type="kokoro_flow_chatter.unified",
|
||||
)
|
||||
|
||||
if not success:
|
||||
logger.error(f"[KFC Unified] LLM 调用失败: {raw_response}")
|
||||
return LLMResponse.create_error_response(raw_response)
|
||||
|
||||
# 输出原始 JSON 响应(DEBUG 级别,用于调试)
|
||||
logger.debug(
|
||||
f"Raw JSON response from LLM for stream {chat_stream.stream_id if chat_stream else 'unknown'}:\n"
|
||||
f"--- JSON START ---\n"
|
||||
f"{raw_response}\n"
|
||||
f"--- JSON END ---"
|
||||
)
|
||||
|
||||
# 解析响应
|
||||
return _parse_unified_response(raw_response, chat_stream.stream_id if chat_stream else None)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[KFC Unified] 生成失败: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return LLMResponse.create_error_response(str(e))
|
||||
|
||||
|
||||
async def _build_context_data(
|
||||
user_name: str,
|
||||
chat_stream: Optional["ChatStream"],
|
||||
user_id: Optional[str] = None,
|
||||
) -> dict[str, str]:
|
||||
"""
|
||||
构建上下文数据(关系、记忆、表达习惯等)
|
||||
"""
|
||||
if not chat_stream:
|
||||
return {
|
||||
"relation_info": f"你与 {user_name} 还不太熟悉,这是早期的交流阶段。",
|
||||
"memory_block": "",
|
||||
"expression_habits": "",
|
||||
"schedule": "",
|
||||
}
|
||||
|
||||
try:
|
||||
from .context_builder import KFCContextBuilder
|
||||
|
||||
builder = KFCContextBuilder(chat_stream)
|
||||
|
||||
# 获取最近的消息作为 target_message(用于记忆检索)
|
||||
target_message = ""
|
||||
if chat_stream.context:
|
||||
unread = chat_stream.context.get_unread_messages()
|
||||
if unread:
|
||||
target_message = unread[-1].processed_plain_text or unread[-1].display_message or ""
|
||||
|
||||
context_data = await builder.build_all_context(
|
||||
sender_name=user_name,
|
||||
target_message=target_message,
|
||||
context=chat_stream.context,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
return context_data
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"构建上下文数据失败: {e}")
|
||||
return {
|
||||
"relation_info": f"你与 {user_name} 还不太熟悉,这是早期的交流阶段。",
|
||||
"memory_block": "",
|
||||
"expression_habits": "",
|
||||
"schedule": "",
|
||||
}
|
||||
|
||||
|
||||
def _get_last_user_message(
|
||||
session: KokoroSession,
|
||||
user_name: str,
|
||||
chat_stream: Optional["ChatStream"],
|
||||
) -> tuple[str, str, str, float, Optional[list]]:
|
||||
"""
|
||||
获取最后一条用户消息
|
||||
|
||||
Returns:
|
||||
tuple: (消息内容, 发送者名称, 发送者ID, 消息时间, 所有未读消息列表)
|
||||
"""
|
||||
message_content = ""
|
||||
sender_name = user_name
|
||||
sender_id = session.user_id or ""
|
||||
message_time = time.time()
|
||||
all_unread = None
|
||||
|
||||
# 从 chat_stream 获取未读消息
|
||||
if chat_stream and chat_stream.context:
|
||||
unread = chat_stream.context.get_unread_messages()
|
||||
if unread:
|
||||
all_unread = unread if len(unread) > 1 else None
|
||||
last_msg = unread[-1]
|
||||
message_content = last_msg.processed_plain_text or last_msg.display_message or ""
|
||||
if last_msg.user_info:
|
||||
sender_name = last_msg.user_info.user_nickname or user_name
|
||||
sender_id = str(last_msg.user_info.user_id)
|
||||
message_time = last_msg.time or time.time()
|
||||
|
||||
# 如果没有从 chat_stream 获取到,从 mental_log 获取
|
||||
if not message_content:
|
||||
for entry in reversed(session.mental_log):
|
||||
if entry.event_type == EventType.USER_MESSAGE:
|
||||
message_content = entry.content or ""
|
||||
sender_name = entry.user_name or user_name
|
||||
message_time = entry.timestamp
|
||||
break
|
||||
|
||||
return message_content, sender_name, sender_id, message_time, all_unread
|
||||
|
||||
|
||||
def _parse_unified_response(raw_response: str, stream_id: str | None = None) -> LLMResponse:
|
||||
"""
|
||||
解析统一模式的 LLM 响应
|
||||
|
||||
响应格式:
|
||||
{
|
||||
"thought": "...",
|
||||
"expected_user_reaction": "...",
|
||||
"max_wait_seconds": 300,
|
||||
"actions": [{"type": "reply", "content": "..."}]
|
||||
}
|
||||
"""
|
||||
data = extract_and_parse_json(raw_response, strict=False)
|
||||
|
||||
if not data or not isinstance(data, dict):
|
||||
logger.warning(f"[KFC Unified] 无法解析 JSON: {raw_response[:200]}...")
|
||||
return LLMResponse.create_error_response("无法解析响应格式")
|
||||
|
||||
# 兼容旧版的字段名
|
||||
# expected_user_reaction -> expected_reaction
|
||||
if "expected_user_reaction" in data and "expected_reaction" not in data:
|
||||
data["expected_reaction"] = data["expected_user_reaction"]
|
||||
|
||||
# 兼容旧版的 reply -> kfc_reply
|
||||
actions = data.get("actions", [])
|
||||
for action in actions:
|
||||
if isinstance(action, dict):
|
||||
if action.get("type") == "reply":
|
||||
action["type"] = "kfc_reply"
|
||||
# 统一模式下模型已经自己分段了,禁用回复分割器
|
||||
if action.get("type") == "kfc_reply":
|
||||
action["enable_splitter"] = False
|
||||
|
||||
response = LLMResponse.from_dict(data)
|
||||
|
||||
# 美化日志输出:内心思考 + 回复内容
|
||||
_log_pretty_response(response, stream_id)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
def _log_pretty_response(response: LLMResponse, stream_id: str | None = None) -> None:
|
||||
"""简洁输出 LLM 响应日志"""
|
||||
if not response.thought and not response.actions:
|
||||
logger.warning("[KFC] 响应为空")
|
||||
return
|
||||
|
||||
stream_tag = f"({stream_id[:8]}) " if stream_id else ""
|
||||
|
||||
# 收集回复内容和其他动作
|
||||
replies = []
|
||||
actions = []
|
||||
for action in response.actions:
|
||||
if action.type == "kfc_reply":
|
||||
content = action.params.get("content", "")
|
||||
if content:
|
||||
replies.append(content)
|
||||
elif action.type not in ("do_nothing", "no_action"):
|
||||
actions.append(action.type)
|
||||
|
||||
# 逐行输出,简洁明了
|
||||
if response.thought:
|
||||
logger.info(f"[KFC] {stream_tag}💭 {response.thought}")
|
||||
|
||||
for i, reply in enumerate(replies):
|
||||
if len(replies) > 1:
|
||||
logger.info(f"[KFC] 💬[{i+1}] {reply}")
|
||||
else:
|
||||
logger.info(f"[KFC] 💬 {reply}")
|
||||
|
||||
if actions:
|
||||
logger.info(f"[KFC] 🎯 {', '.join(actions)}")
|
||||
|
||||
if response.max_wait_seconds > 0 or response.expected_reaction:
|
||||
meta = f"⏱{response.max_wait_seconds}s" if response.max_wait_seconds > 0 else ""
|
||||
if response.expected_reaction:
|
||||
meta += f" 预期: {response.expected_reaction}"
|
||||
logger.info(f"[KFC] {meta.strip()}")
|
||||
@@ -1,5 +1,5 @@
|
||||
[inner]
|
||||
version = "7.9.4"
|
||||
version = "7.9.5"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了MoFox-Bot,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
@@ -648,6 +648,12 @@ log_decisions = false # 是否记录每次决策的详细日志
|
||||
# 开启后,KFC将接管所有私聊消息;关闭后,私聊消息将由AFC处理。
|
||||
enable = true
|
||||
|
||||
# --- 工作模式 ---
|
||||
# 可选值: "unified"(统一模式)或 "split"(分离模式)
|
||||
# unified: 单次LLM调用完成思考和回复生成,类似传统聊天方式,响应更快
|
||||
# split: Planner + Replyer两次LLM调用,先规划再生成回复,控制更精细
|
||||
mode = "split"
|
||||
|
||||
# --- 核心行为配置 ---
|
||||
max_wait_seconds_default = 300 # 默认的最大等待秒数(AI发送消息后愿意等待用户回复的时间)
|
||||
enable_continuous_thinking = true # 是否在等待期间启用心理活动更新
|
||||
@@ -676,7 +682,3 @@ quiet_hours_end = "07:00" # 勿扰结束时间
|
||||
|
||||
# 5. 触发概率:每次检查时主动发起的概率,用于避免过于频繁打扰。
|
||||
trigger_probability = 0.3 # 0.0~1.0,默认30%概率
|
||||
|
||||
# 6. 自然问候:在特定的时间,她会像朋友一样送上问候。
|
||||
enable_morning_greeting = true # 是否启用早安问候 (例如: 8:00 - 9:00)
|
||||
enable_night_greeting = true # 是否启用晚安问候 (例如: 22:00 - 23:00)
|
||||
|
||||
Reference in New Issue
Block a user