feat: 重构Kokoro Flow Chatter,新增规划器和回复生成器,优化提示词构建逻辑

This commit is contained in:
Windpicker-owo
2025-11-30 18:50:21 +08:00
parent 673da5cc8b
commit 8e26a5f58c
7 changed files with 573 additions and 98 deletions

View File

@@ -19,7 +19,8 @@ from .models import (
) )
from .session import KokoroSession, SessionManager, get_session_manager from .session import KokoroSession, SessionManager, get_session_manager
from .chatter import KokoroFlowChatter from .chatter import KokoroFlowChatter
from .replyer import generate_response from .planner import generate_plan
from .replyer import generate_reply_text
from .proactive_thinker import ( from .proactive_thinker import (
ProactiveThinker, ProactiveThinker,
get_proactive_thinker, get_proactive_thinker,
@@ -60,7 +61,8 @@ __all__ = [
"get_session_manager", "get_session_manager",
# Core Components # Core Components
"KokoroFlowChatter", "KokoroFlowChatter",
"generate_response", "generate_plan",
"generate_reply_text",
# Proactive Thinker # Proactive Thinker
"ProactiveThinker", "ProactiveThinker",
"get_proactive_thinker", "get_proactive_thinker",

View File

@@ -19,7 +19,8 @@ from src.plugin_system.base.base_chatter import BaseChatter
from src.plugin_system.base.component_types import ChatType from src.plugin_system.base.component_types import ChatType
from .models import SessionStatus from .models import SessionStatus
from .replyer import generate_response from .planner import generate_plan
from .replyer import generate_reply_text
from .session import get_session_manager from .session import get_session_manager
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -143,8 +144,8 @@ class KokoroFlowChatter(BaseChatter):
# 8. 获取聊天流 # 8. 获取聊天流
chat_stream = await self._get_chat_stream() chat_stream = await self._get_chat_stream()
# 9. 调用 Replyer 生成响应 # 9. 调用 Planner 生成行动计划
response = await generate_response( plan_response = await generate_plan(
session=session, session=session,
user_name=user_name, user_name=user_name,
situation_type=situation_type, situation_type=situation_type,
@@ -152,15 +153,35 @@ class KokoroFlowChatter(BaseChatter):
available_actions=available_actions, available_actions=available_actions,
) )
# 10. 执行动作作 # 10. 对于需要回复的动作,调用 Replyer 生成实际文本
processed_actions = []
for action in plan_response.actions:
if action.type == "kfc_reply":
# 调用 replyer 生成回复文本
success, reply_text = await generate_reply_text(
session=session,
user_name=user_name,
thought=plan_response.thought,
situation_type=situation_type,
chat_stream=chat_stream,
)
if success and reply_text:
# 更新 action 的 content
action.params["content"] = reply_text
else:
logger.warning("[KFC] 回复生成失败,跳过该动作")
continue
processed_actions.append(action)
# 11. 执行动作
exec_results = [] exec_results = []
has_reply = False has_reply = False
for action in response.actions: for action in processed_actions:
result = await self.action_manager.execute_action( result = await self.action_manager.execute_action(
action_name=action.type, action_name=action.type,
chat_id=self.stream_id, chat_id=self.stream_id,
target_message=target_message, target_message=target_message,
reasoning=response.thought, reasoning=plan_response.thought,
action_data=action.params, action_data=action.params,
thinking_id=None, thinking_id=None,
log_prefix="[KFC]", log_prefix="[KFC]",
@@ -169,31 +190,31 @@ class KokoroFlowChatter(BaseChatter):
if result.get("success") and action.type in ("kfc_reply", "respond"): if result.get("success") and action.type in ("kfc_reply", "respond"):
has_reply = True has_reply = True
# 11. 记录 Bot 规划到 mental_log # 12. 记录 Bot 规划到 mental_log
session.add_bot_planning( session.add_bot_planning(
thought=response.thought, thought=plan_response.thought,
actions=[a.to_dict() for a in response.actions], actions=[a.to_dict() for a in processed_actions],
expected_reaction=response.expected_reaction, expected_reaction=plan_response.expected_reaction,
max_wait_seconds=response.max_wait_seconds, max_wait_seconds=plan_response.max_wait_seconds,
) )
# 12. 更新 Session 状态 # 13. 更新 Session 状态
if response.max_wait_seconds > 0: if plan_response.max_wait_seconds > 0:
session.start_waiting( session.start_waiting(
expected_reaction=response.expected_reaction, expected_reaction=plan_response.expected_reaction,
max_wait_seconds=response.max_wait_seconds, max_wait_seconds=plan_response.max_wait_seconds,
) )
else: else:
session.end_waiting() session.end_waiting()
# 13. 标记消息为已读 # 14. 标记消息为已读
for msg in unread_messages: for msg in unread_messages:
context.mark_message_as_read(str(msg.message_id)) context.mark_message_as_read(str(msg.message_id))
# 14. 保存 Session # 15. 保存 Session
await self.session_manager.save_session(user_id) await self.session_manager.save_session(user_id)
# 15. 更新统计 # 16. 更新统计
self._stats["messages_processed"] += len(unread_messages) self._stats["messages_processed"] += len(unread_messages)
if has_reply: if has_reply:
self._stats["successful_responses"] += 1 self._stats["successful_responses"] += 1
@@ -201,15 +222,15 @@ class KokoroFlowChatter(BaseChatter):
logger.info( logger.info(
f"{SOFT_PURPLE}[KFC]{RESET} 处理完成: " f"{SOFT_PURPLE}[KFC]{RESET} 处理完成: "
f"user={user_name}, situation={situation_type}, " f"user={user_name}, situation={situation_type}, "
f"actions={[a.type for a in response.actions]}, " f"actions={[a.type for a in processed_actions]}, "
f"wait={response.max_wait_seconds}s" f"wait={plan_response.max_wait_seconds}s"
) )
return self._build_result( return self._build_result(
success=True, success=True,
message="processed", message="processed",
has_reply=has_reply, has_reply=has_reply,
thought=response.thought, thought=plan_response.thought,
situation_type=situation_type, situation_type=situation_type,
) )

View File

@@ -0,0 +1,112 @@
"""
Kokoro Flow Chatter - Planner
规划器:负责分析情境并生成行动计划
- 输入:会话状态、用户消息、情境类型
- 输出LLMResponse包含 thought、actions、expected_reaction、max_wait_seconds
- 不负责生成具体回复文本,只决定"要做什么"
"""
from typing import TYPE_CHECKING, Optional
from src.common.logger import get_logger
from src.plugin_system.apis import llm_api
from src.utils.json_parser import extract_and_parse_json
from .models import LLMResponse
from .prompt.builder import get_prompt_builder
from .session import KokoroSession
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
logger = get_logger("kfc_planner")
async def generate_plan(
session: KokoroSession,
user_name: str,
situation_type: str = "new_message",
chat_stream: Optional["ChatStream"] = None,
available_actions: Optional[dict] = None,
extra_context: Optional[dict] = None,
) -> LLMResponse:
"""
生成行动计划
Args:
session: 会话对象
user_name: 用户名称
situation_type: 情况类型
chat_stream: 聊天流对象
available_actions: 可用动作字典
extra_context: 额外上下文
Returns:
LLMResponse 对象,包含计划信息
"""
try:
# 1. 构建规划器提示词
prompt_builder = get_prompt_builder()
prompt = await prompt_builder.build_planner_prompt(
session=session,
user_name=user_name,
situation_type=situation_type,
chat_stream=chat_stream,
available_actions=available_actions,
extra_context=extra_context,
)
from src.config.config import global_config
if global_config and global_config.debug.show_prompt:
logger.info(f"[KFC Planner] 生成的规划提示词:\n{prompt}")
# 2. 获取 planner 模型配置并调用 LLM
models = llm_api.get_available_models()
planner_config = models.get("planner")
if not planner_config:
logger.error("[KFC Planner] 未找到 planner 模型配置")
return LLMResponse.create_error_response("未找到 planner 模型配置")
success, raw_response, reasoning, model_name = await llm_api.generate_with_model(
prompt=prompt,
model_config=planner_config,
request_type="kokoro_flow_chatter.plan",
)
if not success:
logger.error(f"[KFC Planner] LLM 调用失败: {raw_response}")
return LLMResponse.create_error_response(raw_response)
logger.debug(f"[KFC Planner] LLM 响应 (model={model_name}):\n{raw_response}")
# 3. 解析响应
return _parse_response(raw_response)
except Exception as e:
logger.error(f"[KFC Planner] 生成失败: {e}")
import traceback
traceback.print_exc()
return LLMResponse.create_error_response(str(e))
def _parse_response(raw_response: str) -> LLMResponse:
"""解析 LLM 响应"""
data = extract_and_parse_json(raw_response, strict=False)
if not data or not isinstance(data, dict):
logger.warning(f"[KFC Planner] 无法解析 JSON: {raw_response[:200]}...")
return LLMResponse.create_error_response("无法解析响应格式")
response = LLMResponse.from_dict(data)
if response.thought:
logger.info(
f"[KFC Planner] 解析成功: thought={response.thought[:50]}..., "
f"actions={[a.type for a in response.actions]}"
)
else:
logger.warning("[KFC Planner] 响应缺少 thought")
return response

View File

@@ -21,7 +21,8 @@ from src.config.config import global_config
from src.plugin_system.apis.unified_scheduler import TriggerType, unified_scheduler from src.plugin_system.apis.unified_scheduler import TriggerType, unified_scheduler
from .models import EventType, SessionStatus from .models import EventType, SessionStatus
from .replyer import generate_response from .planner import generate_plan
from .replyer import generate_reply_text
from .session import KokoroSession, get_session_manager from .session import KokoroSession, get_session_manager
if TYPE_CHECKING: if TYPE_CHECKING:
@@ -288,8 +289,8 @@ class ProactiveThinker:
action_modifier = ActionModifier(action_manager, session.stream_id) action_modifier = ActionModifier(action_manager, session.stream_id)
await action_modifier.modify_actions(chatter_name="KokoroFlowChatter") await action_modifier.modify_actions(chatter_name="KokoroFlowChatter")
# 调用 Replyer 生成超时决策 # 调用 Planner 生成超时决策
response = await generate_response( plan_response = await generate_plan(
session=session, session=session,
user_name=session.user_id, # 这里可以改进,获取真实用户名 user_name=session.user_id, # 这里可以改进,获取真实用户名
situation_type="timeout", situation_type="timeout",
@@ -297,32 +298,50 @@ class ProactiveThinker:
available_actions=action_manager.get_using_actions(), available_actions=action_manager.get_using_actions(),
) )
# 对于需要回复的动作,调用 Replyer 生成实际文本
processed_actions = []
for action in plan_response.actions:
if action.type == "kfc_reply":
success, reply_text = await generate_reply_text(
session=session,
user_name=session.user_id,
thought=plan_response.thought,
situation_type="timeout",
chat_stream=chat_stream,
)
if success and reply_text:
action.params["content"] = reply_text
else:
logger.warning("[ProactiveThinker] 回复生成失败,跳过该动作")
continue
processed_actions.append(action)
# 执行动作 # 执行动作
for action in response.actions: for action in processed_actions:
await action_manager.execute_action( await action_manager.execute_action(
action_name=action.type, action_name=action.type,
chat_id=session.stream_id, chat_id=session.stream_id,
target_message=None, target_message=None,
reasoning=response.thought, reasoning=plan_response.thought,
action_data=action.params, action_data=action.params,
thinking_id=None, thinking_id=None,
log_prefix="[KFC V2 ProactiveThinker]", log_prefix="[KFC ProactiveThinker]",
) )
# 记录到 mental_log # 记录到 mental_log
session.add_bot_planning( session.add_bot_planning(
thought=response.thought, thought=plan_response.thought,
actions=[a.to_dict() for a in response.actions], actions=[a.to_dict() for a in processed_actions],
expected_reaction=response.expected_reaction, expected_reaction=plan_response.expected_reaction,
max_wait_seconds=response.max_wait_seconds, max_wait_seconds=plan_response.max_wait_seconds,
) )
# 更新状态 # 更新状态
if response.max_wait_seconds > 0: if plan_response.max_wait_seconds > 0:
# 继续等待 # 继续等待
session.start_waiting( session.start_waiting(
expected_reaction=response.expected_reaction, expected_reaction=plan_response.expected_reaction,
max_wait_seconds=response.max_wait_seconds, max_wait_seconds=plan_response.max_wait_seconds,
) )
else: else:
# 不再等待 # 不再等待
@@ -333,8 +352,8 @@ class ProactiveThinker:
logger.info( logger.info(
f"[ProactiveThinker] 超时决策完成: user={session.user_id}, " f"[ProactiveThinker] 超时决策完成: user={session.user_id}, "
f"actions={[a.type for a in response.actions]}, " f"actions={[a.type for a in processed_actions]}, "
f"continue_wait={response.max_wait_seconds > 0}" f"continue_wait={plan_response.max_wait_seconds > 0}"
) )
except Exception as e: except Exception as e:
@@ -449,23 +468,25 @@ class ProactiveThinker:
else: else:
silence_duration = f"{silence_seconds / 3600:.1f} 小时" silence_duration = f"{silence_seconds / 3600:.1f} 小时"
# 调用 Replyer extra_context = {
response = await generate_response( "trigger_reason": trigger_reason,
"silence_duration": silence_duration,
}
# 调用 Planner
plan_response = await generate_plan(
session=session, session=session,
user_name=session.user_id, user_name=session.user_id,
situation_type="proactive", situation_type="proactive",
chat_stream=chat_stream, chat_stream=chat_stream,
available_actions=action_manager.get_using_actions(), available_actions=action_manager.get_using_actions(),
extra_context={ extra_context=extra_context,
"trigger_reason": trigger_reason,
"silence_duration": silence_duration,
},
) )
# 检查是否决定不打扰 # 检查是否决定不打扰
is_do_nothing = ( is_do_nothing = (
len(response.actions) == 0 or len(plan_response.actions) == 0 or
(len(response.actions) == 1 and response.actions[0].type == "do_nothing") (len(plan_response.actions) == 1 and plan_response.actions[0].type == "do_nothing")
) )
if is_do_nothing: if is_do_nothing:
@@ -474,32 +495,51 @@ class ProactiveThinker:
await self.session_manager.save_session(session.user_id) await self.session_manager.save_session(session.user_id)
return return
# 对于需要回复的动作,调用 Replyer 生成实际文本
processed_actions = []
for action in plan_response.actions:
if action.type == "kfc_reply":
success, reply_text = await generate_reply_text(
session=session,
user_name=session.user_id,
thought=plan_response.thought,
situation_type="proactive",
chat_stream=chat_stream,
extra_context=extra_context,
)
if success and reply_text:
action.params["content"] = reply_text
else:
logger.warning("[ProactiveThinker] 回复生成失败,跳过该动作")
continue
processed_actions.append(action)
# 执行动作 # 执行动作
for action in response.actions: for action in processed_actions:
await action_manager.execute_action( await action_manager.execute_action(
action_name=action.type, action_name=action.type,
chat_id=session.stream_id, chat_id=session.stream_id,
target_message=None, target_message=None,
reasoning=response.thought, reasoning=plan_response.thought,
action_data=action.params, action_data=action.params,
thinking_id=None, thinking_id=None,
log_prefix="[KFC V2 ProactiveThinker]", log_prefix="[KFC ProactiveThinker]",
) )
# 记录到 mental_log # 记录到 mental_log
session.add_bot_planning( session.add_bot_planning(
thought=response.thought, thought=plan_response.thought,
actions=[a.to_dict() for a in response.actions], actions=[a.to_dict() for a in processed_actions],
expected_reaction=response.expected_reaction, expected_reaction=plan_response.expected_reaction,
max_wait_seconds=response.max_wait_seconds, max_wait_seconds=plan_response.max_wait_seconds,
) )
# 更新状态 # 更新状态
session.last_proactive_at = time.time() session.last_proactive_at = time.time()
if response.max_wait_seconds > 0: if plan_response.max_wait_seconds > 0:
session.start_waiting( session.start_waiting(
expected_reaction=response.expected_reaction, expected_reaction=plan_response.expected_reaction,
max_wait_seconds=response.max_wait_seconds, max_wait_seconds=plan_response.max_wait_seconds,
) )
# 保存 # 保存
@@ -507,7 +547,7 @@ class ProactiveThinker:
logger.info( logger.info(
f"[ProactiveThinker] 主动发起完成: user={session.user_id}, " f"[ProactiveThinker] 主动发起完成: user={session.user_id}, "
f"actions={[a.type for a in response.actions]}" f"actions={[a.type for a in processed_actions]}"
) )
except Exception as e: except Exception as e:

View File

@@ -38,7 +38,7 @@ class PromptBuilder:
def __init__(self): def __init__(self):
self._context_builder = None self._context_builder = None
async def build_prompt( async def build_planner_prompt(
self, self,
session: KokoroSession, session: KokoroSession,
user_name: str, user_name: str,
@@ -48,7 +48,7 @@ class PromptBuilder:
extra_context: Optional[dict] = None, extra_context: Optional[dict] = None,
) -> str: ) -> str:
""" """
构建完整的提示词 构建规划器提示词(用于生成行动计划)
Args: Args:
session: 会话对象 session: 会话对象
@@ -59,7 +59,7 @@ class PromptBuilder:
extra_context: 额外上下文(如 trigger_reason extra_context: 额外上下文(如 trigger_reason
Returns: Returns:
完整的提示词 完整的规划器提示词
""" """
extra_context = extra_context or {} extra_context = extra_context or {}
@@ -89,8 +89,8 @@ class PromptBuilder:
# 6. 构建可用动作 # 6. 构建可用动作
actions_block = self._build_actions_block(available_actions) actions_block = self._build_actions_block(available_actions)
# 7. 获取输出格式 # 7. 获取规划器输出格式
output_format = await self._get_output_format() output_format = await self._get_planner_output_format()
# 8. 使用统一的 prompt 管理系统格式化 # 8. 使用统一的 prompt 管理系统格式化
prompt = await global_prompt_manager.format_prompt( prompt = await global_prompt_manager.format_prompt(
@@ -109,6 +109,76 @@ class PromptBuilder:
return prompt return prompt
async def build_replyer_prompt(
self,
session: KokoroSession,
user_name: str,
thought: str,
situation_type: str = "new_message",
chat_stream: Optional["ChatStream"] = None,
extra_context: Optional[dict] = None,
) -> str:
"""
构建回复器提示词(用于生成自然的回复文本)
Args:
session: 会话对象
user_name: 用户名称
thought: 规划器生成的想法
situation_type: 情况类型
chat_stream: 聊天流对象
extra_context: 额外上下文
Returns:
完整的回复器提示词
"""
extra_context = extra_context or {}
# 获取 user_id
user_id = session.user_id if session else None
# 1. 构建人设块
persona_block = self._build_persona_block()
# 2. 使用 context_builder 获取关系、记忆、表达习惯等
context_data = await self._build_context_data(user_name, chat_stream, user_id)
relation_block = context_data.get("relation_info", f"你与 {user_name} 还不太熟悉,这是早期的交流阶段。")
memory_block = context_data.get("memory_block", "")
expression_habits = self._build_combined_expression_block(context_data.get("expression_habits", ""))
# 3. 构建活动流
activity_stream = await self._build_activity_stream(session, user_name)
# 4. 构建当前情况(简化版,不需要那么详细)
current_situation = await self._build_current_situation(
session, user_name, situation_type, extra_context
)
# 5. 构建聊天历史总览
chat_history_block = await self._build_chat_history_block(chat_stream)
# 6. 构建回复情景上下文
reply_context = await self._build_reply_context(
session, user_name, situation_type, extra_context
)
# 7. 使用回复器专用模板
prompt = await global_prompt_manager.format_prompt(
PROMPT_NAMES["replyer"],
user_name=user_name,
persona_block=persona_block,
relation_block=relation_block,
memory_block=memory_block or "(暂无相关记忆)",
activity_stream=activity_stream or "(这是你们第一次聊天)",
current_situation=current_situation,
chat_history_block=chat_history_block,
expression_habits=expression_habits or "(根据自然对话风格回复即可)",
thought=thought,
reply_context=reply_context,
)
return prompt
def _build_persona_block(self) -> str: def _build_persona_block(self) -> str:
"""构建人设块""" """构建人设块"""
if global_config is None: if global_config is None:
@@ -579,6 +649,91 @@ class PromptBuilder:
"max_wait_seconds": 300 "max_wait_seconds": 300
}""" }"""
async def _get_planner_output_format(self) -> str:
"""获取规划器输出格式模板"""
try:
prompt = await global_prompt_manager.get_prompt_async(
PROMPT_NAMES["planner_output_format"]
)
return prompt.template
except KeyError:
# 如果模板未注册,返回默认格式
return """请用 JSON 格式回复:
{
"thought": "你的想法",
"actions": [{"type": "kfc_reply"}],
"expected_reaction": "期待的反应",
"max_wait_seconds": 300
}
注意kfc_reply 动作不需要填写 content 字段,回复内容会单独生成。"""
async def _build_reply_context(
self,
session: KokoroSession,
user_name: str,
situation_type: str,
extra_context: dict,
) -> str:
"""
构建回复情景上下文
根据 situation_type 构建不同的情景描述,帮助回复器理解当前要回复的情境
"""
# 获取最后一条用户消息
target_message = ""
entries = session.get_recent_entries(limit=10)
for entry in reversed(entries):
if entry.event_type == EventType.USER_MESSAGE:
target_message = entry.content or ""
break
if situation_type == "new_message":
return await global_prompt_manager.format_prompt(
PROMPT_NAMES["replyer_context_normal"],
user_name=user_name,
target_message=target_message or "(无消息内容)",
)
elif situation_type == "reply_in_time":
elapsed = session.waiting_config.get_elapsed_seconds()
max_wait = session.waiting_config.max_wait_seconds
return await global_prompt_manager.format_prompt(
PROMPT_NAMES["replyer_context_in_time"],
user_name=user_name,
target_message=target_message or "(无消息内容)",
elapsed_minutes=elapsed / 60,
max_wait_minutes=max_wait / 60,
)
elif situation_type == "reply_late":
elapsed = session.waiting_config.get_elapsed_seconds()
max_wait = session.waiting_config.max_wait_seconds
return await global_prompt_manager.format_prompt(
PROMPT_NAMES["replyer_context_late"],
user_name=user_name,
target_message=target_message or "(无消息内容)",
elapsed_minutes=elapsed / 60,
max_wait_minutes=max_wait / 60,
)
elif situation_type == "proactive":
silence = extra_context.get("silence_duration", "一段时间")
trigger_reason = extra_context.get("trigger_reason", "")
return await global_prompt_manager.format_prompt(
PROMPT_NAMES["replyer_context_proactive"],
user_name=user_name,
silence_duration=silence,
trigger_reason=trigger_reason,
)
# 默认使用普通情景
return await global_prompt_manager.format_prompt(
PROMPT_NAMES["replyer_context_normal"],
user_name=user_name,
target_message=target_message or "(无消息内容)",
)
# 全局单例 # 全局单例
_prompt_builder: Optional[PromptBuilder] = None _prompt_builder: Optional[PromptBuilder] = None

View File

@@ -212,10 +212,141 @@ kfc_ENTRY_PROACTIVE_TRIGGER = Prompt(
""", """,
) )
# =================================================================================================
# Planner 专用输出格式
# =================================================================================================
kfc_PLANNER_OUTPUT_FORMAT = Prompt(
name="kfc_planner_output_format",
template="""请用以下 JSON 格式回复:
```json
{{
"thought": "你脑子里在想什么,越自然越好",
"actions": [
{{"type": "动作名称", ...动作参数}}
],
"expected_reaction": "你期待对方的反应是什么",
"max_wait_seconds": 300
}}
```
### 字段说明
- `thought`:你的内心独白,记录你此刻的想法和感受。要自然,不要技术性语言。
- `actions`:你要执行的动作列表。每个动作是一个对象,必须包含 `type` 字段指定动作类型,其他字段根据动作类型不同而不同(参考上面每个动作的示例)。
- 对于 `kfc_reply` 动作,只需要指定 `{{"type": "kfc_reply"}}`,不需要填写 `content` 字段(回复内容会单独生成)
- `expected_reaction`:你期待对方如何回应(用于判断是否需要等待)
- `max_wait_seconds`设定等待时间0 表示不等待,超时后你会考虑是否要主动说点什么
### 注意事项
- 动作参数直接写在动作对象里,不需要 `action_data` 包装
- 即使什么都不想做,也放一个 `{{"type": "do_nothing"}}`
- 可以组合多个动作,比如先发消息再发表情""",
)
# =================================================================================================
# Replyer 专用提示词模板
# =================================================================================================
kfc_REPLYER_PROMPT = Prompt(
name="kfc_replyer",
template="""# 你与 {user_name} 的私聊
## 1. 你是谁
{persona_block}
## 2. 你与 {user_name} 的关系
{relation_block}
## 3. 相关记忆
{memory_block}
## 4. 你们之间发生的事(活动流)
以下是你和 {user_name} 最近的互动历史,按时间顺序记录了你们的对话和你的心理活动:
{activity_stream}
## 5. 当前情况
{current_situation}
## 6. 聊天历史总览
以下是你和 {user_name} 的聊天记录,帮助你更好地理解对话上下文:
{chat_history_block}
## 7. 你的表达习惯
{expression_habits}
## 8. 你的决策
你已经决定要回复 {user_name}
**你的想法**{thought}
{reply_context}
## 9. 回复要求
**情景化表达**
- 根据你们的关系亲密度选择合适的语气和称呼
- 参考活动流中的互动历史,保持对话的连贯性
- 如果对方回复得晚/早,可以自然地表现出你的感受
**自然对话**
- 像真实的朋友聊天一样,不要生硬或公式化
- 可以用口语、网络用语、语气词,让回复更生动
- 长度适中,不要太长也不要太短
**表达习惯**
- 参考上面的"表达习惯"部分,使用你习惯的语言风格
- 保持人设的一致性
**禁忌**
- 不要重复你之前说过的话
- 不要输出 JSON 格式或技术性语言
- 不要加引号、括号等多余符号
- 不要用"我决定...""因此..."这种总结性语言
现在,请直接输出你要说的话:""",
)
kfc_REPLYER_CONTEXT_NORMAL = Prompt(
name="kfc_replyer_context_normal",
template="""你要回复的是 {user_name} 刚发来的消息:
{target_message}""",
)
kfc_REPLYER_CONTEXT_IN_TIME = Prompt(
name="kfc_replyer_context_in_time",
template="""你等了 {elapsed_minutes:.1f} 分钟(原本打算最多等 {max_wait_minutes:.1f} 分钟),{user_name} 终于回复了:
{target_message}
你可以表现出一点"等到了回复"的欣喜或轻松。""",
)
kfc_REPLYER_CONTEXT_LATE = Prompt(
name="kfc_replyer_context_late",
template="""你等了 {elapsed_minutes:.1f} 分钟(原本只打算等 {max_wait_minutes:.1f} 分钟),{user_name} 才回复:
{target_message}
虽然有点晚,但对方终于回复了。你可以选择轻轻抱怨一下,也可以装作没在意。""",
)
kfc_REPLYER_CONTEXT_PROACTIVE = Prompt(
name="kfc_replyer_context_proactive",
template="""你们已经有一段时间({silence_duration})没聊天了。{trigger_reason}
你决定主动打破沉默,找 {user_name} 聊点什么。想一个自然的开场白,不要太突兀。""",
)
# 导出所有模板名称,方便外部引用 # 导出所有模板名称,方便外部引用
PROMPT_NAMES = { PROMPT_NAMES = {
"main": "kfc_main", "main": "kfc_main",
"output_format": "kfc_output_format", "output_format": "kfc_output_format",
"planner_output_format": "kfc_planner_output_format",
"replyer": "kfc_replyer",
"replyer_context_normal": "kfc_replyer_context_normal",
"replyer_context_in_time": "kfc_replyer_context_in_time",
"replyer_context_late": "kfc_replyer_context_late",
"replyer_context_proactive": "kfc_replyer_context_proactive",
"situation_new_message": "kfc_situation_new_message", "situation_new_message": "kfc_situation_new_message",
"situation_reply_in_time": "kfc_situation_reply_in_time", "situation_reply_in_time": "kfc_situation_reply_in_time",
"situation_reply_late": "kfc_situation_reply_late", "situation_reply_late": "kfc_situation_reply_late",

View File

@@ -1,16 +1,17 @@
""" """
Kokoro Flow Chatter - Replyer Kokoro Flow Chatter - Replyer
简化的回复生成模块,使用插件系统的 llm_api 纯粹的回复生成器:
- 接收 planner 的决策thought 等)
- 专门负责将回复意图转化为自然的对话文本
- 不输出 JSON直接生成可发送的消息文本
""" """
from typing import TYPE_CHECKING, Optional from typing import TYPE_CHECKING, Optional
from src.common.logger import get_logger from src.common.logger import get_logger
from src.plugin_system.apis import llm_api from src.plugin_system.apis import llm_api
from src.utils.json_parser import extract_and_parse_json
from .models import LLMResponse
from .prompt.builder import get_prompt_builder from .prompt.builder import get_prompt_builder
from .session import KokoroSession from .session import KokoroSession
@@ -20,90 +21,103 @@ if TYPE_CHECKING:
logger = get_logger("kfc_replyer") logger = get_logger("kfc_replyer")
async def generate_response( async def generate_reply_text(
session: KokoroSession, session: KokoroSession,
user_name: str, user_name: str,
thought: str,
situation_type: str = "new_message", situation_type: str = "new_message",
chat_stream: Optional["ChatStream"] = None, chat_stream: Optional["ChatStream"] = None,
available_actions: Optional[dict] = None,
extra_context: Optional[dict] = None, extra_context: Optional[dict] = None,
) -> LLMResponse: ) -> tuple[bool, str]:
""" """
生成回复 生成回复文本
Args: Args:
session: 会话对象 session: 会话对象
user_name: 用户名称 user_name: 用户名称
thought: 规划器生成的想法(内心独白)
situation_type: 情况类型 situation_type: 情况类型
chat_stream: 聊天流对象 chat_stream: 聊天流对象
available_actions: 可用动作字典
extra_context: 额外上下文 extra_context: 额外上下文
Returns: Returns:
LLMResponse 对象 (success, reply_text) 元组
- success: 是否成功生成
- reply_text: 生成的回复文本
""" """
try: try:
# 1. 构建提示词 # 1. 构建回复器提示词
prompt_builder = get_prompt_builder() prompt_builder = get_prompt_builder()
prompt = await prompt_builder.build_prompt( prompt = await prompt_builder.build_replyer_prompt(
session=session, session=session,
user_name=user_name, user_name=user_name,
thought=thought,
situation_type=situation_type, situation_type=situation_type,
chat_stream=chat_stream, chat_stream=chat_stream,
available_actions=available_actions,
extra_context=extra_context, extra_context=extra_context,
) )
from src.config.config import global_config from src.config.config import global_config
if global_config and global_config.debug.show_prompt: if global_config and global_config.debug.show_prompt:
logger.info(f"[KFC Replyer] 生成的提示词:\n{prompt}") logger.info(f"[KFC Replyer] 生成的回复提示词:\n{prompt}")
# 2. 获取模型配置并调用 LLM # 2. 获取 replyer 模型配置并调用 LLM
models = llm_api.get_available_models() models = llm_api.get_available_models()
replyer_config = models.get("replyer") replyer_config = models.get("replyer")
if not replyer_config: if not replyer_config:
logger.error("[KFC Replyer] 未找到 replyer 模型配置") logger.error("[KFC Replyer] 未找到 replyer 模型配置")
return LLMResponse.create_error_response("未找到 replyer 模型配置") return False, "(回复生成失败:未找到模型配置"
success, raw_response, reasoning, model_name = await llm_api.generate_with_model( success, raw_response, reasoning, model_name = await llm_api.generate_with_model(
prompt=prompt, prompt=prompt,
model_config=replyer_config, model_config=replyer_config,
request_type="kokoro_flow_chatter", request_type="kokoro_flow_chatter.reply",
) )
if not success: if not success:
logger.error(f"[KFC Replyer] LLM 调用失败: {raw_response}") logger.error(f"[KFC Replyer] LLM 调用失败: {raw_response}")
return LLMResponse.create_error_response(raw_response) return False, "(回复生成失败)"
logger.debug(f"[KFC Replyer] LLM 响应 (model={model_name}):\n{raw_response}") # 3. 清理并返回回复文本
reply_text = _clean_reply_text(raw_response)
# 3. 解析响应 logger.info(f"[KFC Replyer] 生成成功 (model={model_name}): {reply_text[:50]}...")
return _parse_response(raw_response)
return True, reply_text
except Exception as e: except Exception as e:
logger.error(f"[KFC Replyer] 生成失败: {e}") logger.error(f"[KFC Replyer] 生成失败: {e}")
import traceback import traceback
traceback.print_exc() traceback.print_exc()
return LLMResponse.create_error_response(str(e)) return False, "(回复生成失败)"
def _parse_response(raw_response: str) -> LLMResponse: def _clean_reply_text(raw_text: str) -> str:
"""解析 LLM 响应""" """
data = extract_and_parse_json(raw_response, strict=False) 清理回复文本
if not data or not isinstance(data, dict): 移除可能的前后缀、引号、markdown 标记等
logger.warning(f"[KFC Replyer] 无法解析 JSON: {raw_response[:200]}...") """
return LLMResponse.create_error_response("无法解析响应格式") text = raw_text.strip()
response = LLMResponse.from_dict(data) # 移除可能的 markdown 代码块标记
if text.startswith("```") and text.endswith("```"):
lines = text.split("\n")
if len(lines) >= 3:
# 移除首尾的 ``` 行
text = "\n".join(lines[1:-1]).strip()
if response.thought: # 移除首尾的引号(如果整个文本被引号包裹)
logger.info( if (text.startswith('"') and text.endswith('"')) or \
f"[KFC Replyer] 解析成功: thought={response.thought[:50]}..., " (text.startswith("'") and text.endswith("'")):
f"actions={[a.type for a in response.actions]}" text = text[1:-1].strip()
)
else:
logger.warning("[KFC Replyer] 响应缺少 thought")
return response # 移除可能的"你说:"、"回复:"等前缀
prefixes_to_remove = ["你说:", "你说:", "回复:", "回复:", "我说:", "我说:"]
for prefix in prefixes_to_remove:
if text.startswith(prefix):
text = text[len(prefix):].strip()
break
return text