Merge pull request #17 from MoFox-Studio/feature/kfc

Feature/kfc
This commit is contained in:
拾风
2025-12-01 16:07:05 +08:00
committed by GitHub
47 changed files with 7272 additions and 922 deletions

View File

@@ -10,16 +10,23 @@
python scripts/migrate_database.py --help python scripts/migrate_database.py --help
python scripts/migrate_database.py --source sqlite --target postgresql python scripts/migrate_database.py --source sqlite --target postgresql
python scripts/migrate_database.py --source mysql --target postgresql --batch-size 5000 python scripts/migrate_database.py --source mysql --target postgresql --batch-size 5000
# 交互式向导模式(推荐)
python scripts/migrate_database.py
注意事项: 注意事项:
1. 迁移前请备份源数据库 1. 迁移前请备份源数据库
2. 目标数据库应该是空的或不存在的(脚本会自动创建表) 2. 目标数据库应该是空的或不存在的(脚本会自动创建表)
3. 迁移过程可能需要较长时间,请耐心等待 3. 迁移过程可能需要较长时间,请耐心等待
4. 迁移到 PostgreSQL 时,脚本会自动:
- 修复布尔列类型SQLite INTEGER -> PostgreSQL BOOLEAN
- 重置序列值(避免主键冲突)
实现细节: 实现细节:
- 使用 SQLAlchemy 进行数据库连接和元数据管理 - 使用 SQLAlchemy 进行数据库连接和元数据管理
- 采用流式迁移,避免一次性加载过多数据 - 采用流式迁移,避免一次性加载过多数据
- 支持 SQLite、MySQL、PostgreSQL 之间的互相迁移 - 支持 SQLite、MySQL、PostgreSQL 之间的互相迁移
- 批量插入失败时自动降级为逐行插入,最大程度保留数据
""" """
from __future__ import annotations from __future__ import annotations
@@ -52,6 +59,8 @@ except ImportError:
from typing import Any, Iterable, Callable from typing import Any, Iterable, Callable
from datetime import datetime as dt
from sqlalchemy import ( from sqlalchemy import (
create_engine, create_engine,
MetaData, MetaData,
@@ -314,6 +323,143 @@ def get_table_row_count(conn: Connection, table: Table) -> int:
return 0 return 0
def convert_value_for_target(
val: Any,
col_name: str,
source_col_type: Any,
target_col_type: Any,
target_dialect: str,
target_col_nullable: bool = True,
) -> Any:
"""转换值以适配目标数据库类型
处理以下情况:
1. 空字符串日期时间 -> None
2. SQLite INTEGER (0/1) -> PostgreSQL BOOLEAN
3. 字符串日期时间 -> datetime 对象
4. 跳过主键 id (让目标数据库自增)
5. 对于 NOT NULL 列,提供合适的默认值
Args:
val: 原始值
col_name: 列名
source_col_type: 源列类型
target_col_type: 目标列类型
target_dialect: 目标数据库方言名称
target_col_nullable: 目标列是否允许 NULL
Returns:
转换后的值
"""
# 获取目标类型的类名
target_type_name = target_col_type.__class__.__name__.upper()
source_type_name = source_col_type.__class__.__name__.upper()
# 处理 None 值
if val is None:
# 如果目标列不允许 NULL提供默认值
if not target_col_nullable:
# Boolean 类型的默认值是 False
if target_type_name == "BOOLEAN" or isinstance(target_col_type, sqltypes.Boolean):
return False
# 数值类型的默认值
if target_type_name in ("INTEGER", "BIGINT", "SMALLINT") or isinstance(target_col_type, sqltypes.Integer):
return 0
if target_type_name in ("FLOAT", "DOUBLE", "REAL", "NUMERIC", "DECIMAL", "DOUBLE_PRECISION") or isinstance(target_col_type, sqltypes.Float):
return 0.0
# 日期时间类型的默认值
if target_type_name in ("DATETIME", "TIMESTAMP") or isinstance(target_col_type, sqltypes.DateTime):
return dt.now()
# 字符串类型的默认值
if target_type_name in ("VARCHAR", "STRING", "TEXT") or isinstance(target_col_type, (sqltypes.String, sqltypes.Text)):
return ""
# 其他类型也返回空字符串作为兜底
return ""
return None
# 处理 Boolean 类型转换
# SQLite 中 Boolean 实际存储为 INTEGER (0/1)
if target_type_name == "BOOLEAN" or isinstance(target_col_type, sqltypes.Boolean):
if isinstance(val, bool):
return val
if isinstance(val, (int, float)):
return bool(val)
if isinstance(val, str):
val_lower = val.lower().strip()
if val_lower in ("true", "1", "yes"):
return True
elif val_lower in ("false", "0", "no", ""):
return False
return bool(val) if val else False
# 处理 DateTime 类型转换
if target_type_name in ("DATETIME", "TIMESTAMP") or isinstance(target_col_type, sqltypes.DateTime):
if isinstance(val, dt):
return val
if isinstance(val, str):
val = val.strip()
# 空字符串 -> None
if val == "":
return None
# 尝试多种日期格式
for fmt in [
"%Y-%m-%d %H:%M:%S.%f",
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%dT%H:%M:%S.%f",
"%Y-%m-%dT%H:%M:%S",
"%Y-%m-%d",
]:
try:
return dt.strptime(val, fmt)
except ValueError:
continue
# 如果都失败,尝试 fromisoformat
try:
return dt.fromisoformat(val)
except ValueError:
logger.warning("无法解析日期时间字符串 '%s' (列: %s),设为 None", val, col_name)
return None
# 如果是数值(时间戳),尝试转换
if isinstance(val, (int, float)) and val > 0:
try:
return dt.fromtimestamp(val)
except (OSError, ValueError, OverflowError):
return None
return None
# 处理 Float 类型
if target_type_name == "FLOAT" or isinstance(target_col_type, sqltypes.Float):
if isinstance(val, (int, float)):
return float(val)
if isinstance(val, str):
val = val.strip()
if val == "":
return None
try:
return float(val)
except ValueError:
return None
return val
# 处理 Integer 类型
if target_type_name == "INTEGER" or isinstance(target_col_type, sqltypes.Integer):
if isinstance(val, int):
return val
if isinstance(val, float):
return int(val)
if isinstance(val, str):
val = val.strip()
if val == "":
return None
try:
return int(float(val))
except ValueError:
return None
return val
return val
def copy_table_structure(source_table: Table, target_metadata: MetaData, target_engine: Engine) -> Table: def copy_table_structure(source_table: Table, target_metadata: MetaData, target_engine: Engine) -> Table:
"""复制表结构到目标数据库,使其结构保持一致""" """复制表结构到目标数据库,使其结构保持一致"""
target_is_sqlite = target_engine.dialect.name == "sqlite" target_is_sqlite = target_engine.dialect.name == "sqlite"
@@ -351,19 +497,23 @@ def copy_table_structure(source_table: Table, target_metadata: MetaData, target_
def migrate_table_data( def migrate_table_data(
source_conn: Connection, source_conn: Connection,
target_conn: Connection, target_engine: Engine,
source_table: Table, source_table: Table,
target_table: Table, target_table: Table,
batch_size: int = 1000, batch_size: int = 1000,
target_dialect: str = "postgresql",
row_limit: int | None = None,
) -> tuple[int, int]: ) -> tuple[int, int]:
"""迁移单个表的数据 """迁移单个表的数据
Args: Args:
source_conn: 源数据库连接 source_conn: 源数据库连接
target_conn: 目标数据库连接 target_engine: 目标数据库引擎(注意:改为 engine 而不是 connection
source_table: 源表对象 source_table: 源表对象
target_table: 目标表对象 target_table: 目标表对象
batch_size: 每批次处理大小 batch_size: 每批次处理大小
target_dialect: 目标数据库方言 (sqlite/mysql/postgresql)
row_limit: 最大迁移行数限制None 表示不限制
Returns: Returns:
tuple[int, int]: (迁移行数, 错误数量) tuple[int, int]: (迁移行数, 错误数量)
@@ -377,40 +527,101 @@ def migrate_table_data(
migrated_rows = 0 migrated_rows = 0
error_count = 0 error_count = 0
conversion_warnings = 0
# 构建源列到目标列的映射
target_cols_by_name = {c.key: c for c in target_table.columns}
# 识别主键列(通常是 id迁移时保留原始 ID 以避免重复数据
primary_key_cols = {c.key for c in source_table.primary_key.columns}
# 使用流式查询,避免一次性加载太多数据 # 使用流式查询,避免一次性加载太多数据
# 对于 SQLAlchemy 1.4/2.0 可以使用 yield_per # 使用 text() 原始 SQL 查询,避免 SQLAlchemy 自动类型转换(如 DateTime导致的错误
try: try:
select_stmt = source_table.select() # 构建原始 SQL 查询语句
result = source_conn.execute(select_stmt) col_names = [c.key for c in source_table.columns]
if row_limit:
# 按时间或 ID 倒序取最新的 row_limit 条
raw_sql = text(f"SELECT {', '.join(col_names)} FROM {source_table.name} ORDER BY id DESC LIMIT {row_limit}")
logger.info(" 限制迁移最新 %d", row_limit)
else:
raw_sql = text(f"SELECT {', '.join(col_names)} FROM {source_table.name}")
result = source_conn.execute(raw_sql)
except SQLAlchemyError as e: except SQLAlchemyError as e:
logger.error("查询表 %s 失败: %s", source_table.name, e) logger.error("查询表 %s 失败: %s", source_table.name, e)
return 0, 1 return 0, 1
def insert_batch(rows: list[dict]): def insert_batch(rows: list[dict]):
"""每个批次使用独立的事务,批次失败时降级为逐行插入"""
nonlocal migrated_rows, error_count nonlocal migrated_rows, error_count
if not rows: if not rows:
return return
try: try:
target_conn.execute(target_table.insert(), rows) # 每个批次使用独立的事务
with target_engine.begin() as target_conn:
target_conn.execute(target_table.insert(), rows)
migrated_rows += len(rows) migrated_rows += len(rows)
logger.info(" 已迁移 %d/%s", migrated_rows, total_rows or "?") logger.info(" 已迁移 %d/%s", migrated_rows, total_rows or "?")
except SQLAlchemyError as e: except SQLAlchemyError as e:
logger.error("写入表 %s 失败: %s", target_table.name, e) # 批量插入失败,降级为逐行插入
error_count += len(rows) logger.warning("批量插入失败,降级为逐行插入 (共 %d 行): %s", len(rows), str(e)[:200])
for row in rows:
try:
with target_engine.begin() as target_conn:
target_conn.execute(target_table.insert(), [row])
migrated_rows += 1
except SQLAlchemyError as row_e:
# 记录失败的行信息
row_id = row.get("id", "unknown")
logger.error("插入行失败 (id=%s): %s", row_id, str(row_e)[:200])
error_count += 1
logger.info(" 逐行插入完成,已迁移 %d/%s", migrated_rows, total_rows or "?")
batch: list[dict] = [] batch: list[dict] = []
null_char_replacements = 0 null_char_replacements = 0
# 构建列名列表(用于通过索引访问原始 SQL 结果)
col_list = list(source_table.columns)
col_name_to_idx = {c.key: idx for idx, c in enumerate(col_list)}
for row in result: for row in result:
# Use column objects to access row mapping to avoid quoted_name keys
row_dict = {} row_dict = {}
for col in source_table.columns: for col in col_list:
val = row._mapping[col] col_key = col.key
# 保留主键列id确保数据一致性
# 注意:如果目标表使用自增主键,可能需要重置序列
# 通过索引获取原始值(避免 SQLAlchemy 自动类型转换)
col_idx = col_name_to_idx[col_key]
val = row[col_idx]
# 处理 NUL 字符
if isinstance(val, str) and "\x00" in val: if isinstance(val, str) and "\x00" in val:
val = val.replace("\x00", "") val = val.replace("\x00", "")
null_char_replacements += 1 null_char_replacements += 1
row_dict[col.key] = val
# 获取目标列类型进行转换
target_col = target_cols_by_name.get(col_key)
if target_col is not None:
try:
val = convert_value_for_target(
val=val,
col_name=col_key,
source_col_type=col.type,
target_col_type=target_col.type,
target_dialect=target_dialect,
target_col_nullable=target_col.nullable if target_col.nullable is not None else True,
)
except Exception as e:
conversion_warnings += 1
if conversion_warnings <= 5:
logger.warning(
"值转换异常 (表=%s, 列=%s, 值=%r): %s",
source_table.name, col_key, val, e
)
row_dict[col_key] = val
batch.append(row_dict) batch.append(row_dict)
if len(batch) >= batch_size: if len(batch) >= batch_size:
@@ -432,6 +643,12 @@ def migrate_table_data(
source_table.name, source_table.name,
null_char_replacements, null_char_replacements,
) )
if conversion_warnings:
logger.warning(
"%s%d 个值发生类型转换警告",
source_table.name,
conversion_warnings,
)
return migrated_rows, error_count return migrated_rows, error_count
@@ -479,6 +696,9 @@ class DatabaseMigrator:
batch_size: int = 1000, batch_size: int = 1000,
source_config: dict | None = None, source_config: dict | None = None,
target_config: dict | None = None, target_config: dict | None = None,
skip_tables: set | None = None,
only_tables: set | None = None,
no_create_tables: bool = False,
): ):
"""初始化迁移器 """初始化迁移器
@@ -488,12 +708,18 @@ class DatabaseMigrator:
batch_size: 批量处理大小 batch_size: 批量处理大小
source_config: 源数据库配置(可选,默认从配置文件读取) source_config: 源数据库配置(可选,默认从配置文件读取)
target_config: 目标数据库配置(可选,需要手动指定) target_config: 目标数据库配置(可选,需要手动指定)
skip_tables: 要跳过的表名集合
only_tables: 只迁移的表名集合(设置后忽略 skip_tables
no_create_tables: 是否跳过创建表结构(假设目标表已存在)
""" """
self.source_type = source_type.lower() self.source_type = source_type.lower()
self.target_type = target_type.lower() self.target_type = target_type.lower()
self.batch_size = batch_size self.batch_size = batch_size
self.source_config = source_config self.source_config = source_config
self.target_config = target_config self.target_config = target_config
self.skip_tables = skip_tables or set()
self.only_tables = only_tables or set()
self.no_create_tables = no_create_tables
self._validate_database_types() self._validate_database_types()
@@ -659,25 +885,60 @@ class DatabaseMigrator:
tables = self._get_tables_in_dependency_order() tables = self._get_tables_in_dependency_order()
logger.info("按依赖顺序迁移表: %s", ", ".join(t.name for t in tables)) logger.info("按依赖顺序迁移表: %s", ", ".join(t.name for t in tables))
# 删除目标库中已有表(可选) # 如果指定了 only_tables则过滤表列表
self._drop_target_tables() if self.only_tables:
tables = [t for t in tables if t.name in self.only_tables]
logger.info("只迁移指定的表: %s", ", ".join(t.name for t in tables))
if not tables:
logger.warning("没有找到任何匹配 --only-tables 的表")
return
# 删除目标库中已有表(可选)- 如果是增量迁移则跳过
if not self.no_create_tables:
self._drop_target_tables()
# 获取目标数据库方言
target_dialect = self.target_engine.dialect.name
# 开始迁移 # 开始迁移
with self.source_engine.connect() as source_conn: with self.source_engine.connect() as source_conn:
for source_table in tables: for source_table in tables:
try: # 跳过指定的表(仅在未指定 only_tables 时生效)
# 在目标库中创建表结构 if not self.only_tables and source_table.name in self.skip_tables:
target_table = copy_table_structure(source_table, MetaData(), self.target_engine) logger.info("跳过表: %s (在 skip_tables 列表中)", source_table.name)
continue
# 每张表单独事务,避免退出上下文被自动回滚 try:
with self.target_engine.begin() as target_conn: # 在目标库中创建表结构(除非指定了 no_create_tables
migrated_rows, error_count = migrate_table_data( if self.no_create_tables:
source_conn, # 反射目标数据库中已存在的表结构
target_conn, target_metadata = MetaData()
source_table, target_metadata.reflect(bind=self.target_engine, only=[source_table.name])
target_table, target_table = target_metadata.tables.get(source_table.name)
batch_size=self.batch_size, if target_table is None:
) logger.error("目标数据库中不存在表: %s,请先创建表结构或移除 --no-create-tables 参数", source_table.name)
self.stats["errors"].append(f"目标数据库中不存在表: {source_table.name}")
continue
logger.info("使用目标数据库中已存在的表结构: %s", source_table.name)
else:
target_table = copy_table_structure(source_table, MetaData(), self.target_engine)
# 对 messages 表限制迁移行数(只迁移最新 1 万条)
row_limit = None
if source_table.name == "messages":
row_limit = 10000
logger.info("messages 表将只迁移最新 %d 条记录", row_limit)
# 每个批次使用独立事务,传入 engine 而不是 connection
migrated_rows, error_count = migrate_table_data(
source_conn,
self.target_engine,
source_table,
target_table,
batch_size=self.batch_size,
target_dialect=target_dialect,
row_limit=row_limit,
)
self.stats["tables_migrated"] += 1 self.stats["tables_migrated"] += 1
self.stats["rows_migrated"] += migrated_rows self.stats["rows_migrated"] += migrated_rows
@@ -691,6 +952,11 @@ class DatabaseMigrator:
self.stats["errors"].append(f"{source_table.name} 迁移失败: {e}") self.stats["errors"].append(f"{source_table.name} 迁移失败: {e}")
self.stats["end_time"] = time.time() self.stats["end_time"] = time.time()
# 迁移完成后,自动修复 PostgreSQL 特有问题
if self.target_type == "postgresql" and self.target_engine:
fix_postgresql_boolean_columns(self.target_engine)
fix_postgresql_sequences(self.target_engine)
def print_summary(self): def print_summary(self):
"""打印迁移总结""" """打印迁移总结"""
@@ -804,6 +1070,29 @@ def parse_args():
target_group.add_argument("--target-schema", type=str, default="public", help="PostgreSQL schema") target_group.add_argument("--target-schema", type=str, default="public", help="PostgreSQL schema")
target_group.add_argument("--target-charset", type=str, default="utf8mb4", help="MySQL 字符集") target_group.add_argument("--target-charset", type=str, default="utf8mb4", help="MySQL 字符集")
# 跳过表参数
parser.add_argument(
"--skip-tables",
type=str,
default="",
help="跳过迁移的表名,多个表名用逗号分隔(如: messages,logs",
)
# 只迁移指定表参数
parser.add_argument(
"--only-tables",
type=str,
default="",
help="只迁移指定的表名,多个表名用逗号分隔(如: user_relationships,maizone_schedule_status。设置后将忽略 --skip-tables",
)
# 不创建表结构,假设目标表已存在
parser.add_argument(
"--no-create-tables",
action="store_true",
help="不创建表结构,假设目标数据库中的表已存在。用于增量迁移指定表的数据",
)
return parser.parse_args() return parser.parse_args()
@@ -1012,6 +1301,112 @@ def interactive_setup() -> dict:
} }
def fix_postgresql_sequences(engine: Engine):
"""修复 PostgreSQL 序列值
迁移数据后PostgreSQL 的序列(用于自增主键)可能没有更新到正确的值,
导致插入新记录时出现主键冲突。此函数会自动检测并重置所有序列。
Args:
engine: PostgreSQL 数据库引擎
"""
if engine.dialect.name != "postgresql":
logger.info("非 PostgreSQL 数据库,跳过序列修复")
return
logger.info("正在修复 PostgreSQL 序列...")
with engine.connect() as conn:
# 获取所有带有序列的表
result = conn.execute(text('''
SELECT
t.table_name,
c.column_name,
pg_get_serial_sequence(t.table_name, c.column_name) as sequence_name
FROM information_schema.tables t
JOIN information_schema.columns c
ON t.table_name = c.table_name AND t.table_schema = c.table_schema
WHERE t.table_schema = 'public'
AND t.table_type = 'BASE TABLE'
AND c.column_default LIKE 'nextval%'
ORDER BY t.table_name
'''))
sequences = result.fetchall()
logger.info("发现 %d 个带序列的表", len(sequences))
fixed_count = 0
for table_name, column_name, seq_name in sequences:
if seq_name:
try:
# 获取当前表中该列的最大值
max_result = conn.execute(text(f'SELECT COALESCE(MAX({column_name}), 0) FROM {table_name}'))
max_val = max_result.scalar()
# 设置序列的下一个值
next_val = max_val + 1
conn.execute(text(f"SELECT setval('{seq_name}', {next_val}, false)"))
conn.commit()
logger.info("%s.%s: 最大值=%d, 序列设为=%d", table_name, column_name, max_val, next_val)
fixed_count += 1
except Exception as e:
logger.warning("%s.%s: 修复失败 - %s", table_name, column_name, e)
logger.info("序列修复完成!共修复 %d 个序列", fixed_count)
def fix_postgresql_boolean_columns(engine: Engine):
"""修复 PostgreSQL 布尔列类型
从 SQLite 迁移后,布尔列可能是 INTEGER 类型。此函数将其转换为 BOOLEAN。
Args:
engine: PostgreSQL 数据库引擎
"""
if engine.dialect.name != "postgresql":
logger.info("非 PostgreSQL 数据库,跳过布尔列修复")
return
# 已知需要转换为 BOOLEAN 的列
BOOLEAN_COLUMNS = {
'messages': ['is_mentioned', 'is_emoji', 'is_picid', 'is_command',
'is_notify', 'is_public_notice', 'should_reply', 'should_act'],
'action_records': ['action_done', 'action_build_into_prompt'],
}
logger.info("正在检查并修复 PostgreSQL 布尔列...")
with engine.connect() as conn:
fixed_count = 0
for table_name, columns in BOOLEAN_COLUMNS.items():
for col_name in columns:
try:
# 检查当前类型
result = conn.execute(text(f'''
SELECT data_type FROM information_schema.columns
WHERE table_name = '{table_name}' AND column_name = '{col_name}'
'''))
row = result.fetchone()
if row and row[0] != 'boolean':
# 需要修复
conn.execute(text(f'''
ALTER TABLE {table_name}
ALTER COLUMN {col_name} TYPE BOOLEAN
USING CASE WHEN {col_name} = 0 THEN FALSE ELSE TRUE END
'''))
conn.commit()
logger.info("%s.%s: %s -> BOOLEAN", table_name, col_name, row[0])
fixed_count += 1
except Exception as e:
logger.warning(" ⚠️ %s.%s: 检查/修复失败 - %s", table_name, col_name, e)
if fixed_count > 0:
logger.info("布尔列修复完成!共修复 %d", fixed_count)
else:
logger.info("所有布尔列类型正确,无需修复")
def main(): def main():
"""主函数""" """主函数"""
args = parse_args() args = parse_args()
@@ -1055,12 +1450,27 @@ def main():
sys.exit(1) sys.exit(1)
try: try:
# 解析跳过的表
skip_tables = set()
if args.skip_tables:
skip_tables = {t.strip() for t in args.skip_tables.split(",") if t.strip()}
logger.info("将跳过以下表: %s", ", ".join(skip_tables))
# 解析只迁移的表
only_tables = set()
if args.only_tables:
only_tables = {t.strip() for t in args.only_tables.split(",") if t.strip()}
logger.info("将只迁移以下表: %s", ", ".join(only_tables))
migrator = DatabaseMigrator( migrator = DatabaseMigrator(
source_type=args.source, source_type=args.source,
target_type=args.target, target_type=args.target,
batch_size=args.batch_size, batch_size=args.batch_size,
source_config=source_config, source_config=source_config,
target_config=target_config, target_config=target_config,
skip_tables=skip_tables,
only_tables=only_tables,
no_create_tables=args.no_create_tables,
) )
stats = migrator.run() stats = migrator.run()

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env python3
"""重置 PostgreSQL 序列值
迁移数据后PostgreSQL 的序列(用于自增主键)可能没有更新到正确的值,
导致插入新记录时出现主键冲突。此脚本会自动检测并重置所有序列。
使用方法:
python scripts/reset_pg_sequences.py --host localhost --port 5432 --database maibot --user postgres --password your_password
"""
import argparse
import psycopg
def reset_sequences(host: str, port: int, database: str, user: str, password: str):
"""重置所有序列值"""
conn_str = f"host={host} port={port} dbname={database} user={user} password={password}"
print(f"连接到 PostgreSQL: {host}:{port}/{database}")
conn = psycopg.connect(conn_str)
conn.autocommit = True
# 查询所有序列及其关联的表和列
query = """
SELECT
t.relname AS table_name,
a.attname AS column_name,
s.relname AS sequence_name
FROM pg_class s
JOIN pg_depend d ON d.objid = s.oid
JOIN pg_class t ON d.refobjid = t.oid
JOIN pg_attribute a ON (d.refobjid, d.refobjsubid) = (a.attrelid, a.attnum)
WHERE s.relkind = 'S'
"""
cursor = conn.execute(query)
sequences = cursor.fetchall()
print(f"发现 {len(sequences)} 个序列")
reset_count = 0
for table_name, col_name, seq_name in sequences:
try:
# 获取当前最大 ID
max_result = conn.execute(f'SELECT MAX("{col_name}") FROM "{table_name}"')
max_id = max_result.fetchone()[0]
if max_id is not None:
# 重置序列
conn.execute(f"SELECT setval('{seq_name}', {max_id}, true)")
print(f"{seq_name} -> {max_id}")
reset_count += 1
else:
print(f" - {seq_name}: 表为空,跳过")
except Exception as e:
print(f"{table_name}.{col_name}: {e}")
conn.close()
print(f"\n✅ 重置完成!共重置 {reset_count} 个序列")
def main():
parser = argparse.ArgumentParser(description="重置 PostgreSQL 序列值")
parser.add_argument("--host", default="localhost", help="PostgreSQL 主机")
parser.add_argument("--port", type=int, default=5432, help="PostgreSQL 端口")
parser.add_argument("--database", default="maibot", help="数据库名")
parser.add_argument("--user", default="postgres", help="用户名")
parser.add_argument("--password", required=True, help="密码")
args = parser.parse_args()
reset_sequences(args.host, args.port, args.database, args.user, args.password)
if __name__ == "__main__":
main()

View File

@@ -57,12 +57,40 @@ class ChatterManager:
self.stats["chatters_registered"] += 1 self.stats["chatters_registered"] += 1
def get_chatter_class(self, chat_type: ChatType) -> type | None: def get_chatter_class_for_chat_type(self, chat_type: ChatType) -> type | None:
"""获取指定聊天类型的聊天处理器类""" """
if chat_type in self.chatter_classes: 获取指定聊天类型的最佳聊天处理器类
return self.chatter_classes[chat_type][0]
优先级规则:
1. 优先选择明确匹配当前聊天类型的 Chatter如 PRIVATE 或 GROUP
2. 如果没有精确匹配,才使用 ALL 类型的 Chatter
Args:
chat_type: 聊天类型
Returns:
最佳匹配的聊天处理器类,如果没有匹配则返回 None
"""
# 1. 首先尝试精确匹配(排除 ALL 类型)
if chat_type != ChatType.ALL and chat_type in self.chatter_classes:
chatter_list = self.chatter_classes[chat_type]
if chatter_list:
logger.debug(f"找到精确匹配的聊天处理器: {chatter_list[0].__name__} for {chat_type.value}")
return chatter_list[0]
# 2. 如果没有精确匹配,回退到 ALL 类型
if ChatType.ALL in self.chatter_classes:
chatter_list = self.chatter_classes[ChatType.ALL]
if chatter_list:
logger.debug(f"使用通用聊天处理器: {chatter_list[0].__name__} for {chat_type.value}")
return chatter_list[0]
return None return None
def get_chatter_class(self, chat_type: ChatType) -> type | None:
"""获取指定聊天类型的聊天处理器类(兼容旧接口)"""
return self.get_chatter_class_for_chat_type(chat_type)
def get_supported_chat_types(self) -> list[ChatType]: def get_supported_chat_types(self) -> list[ChatType]:
"""获取支持的聊天类型列表""" """获取支持的聊天类型列表"""
return list(self.chatter_classes.keys()) return list(self.chatter_classes.keys())
@@ -112,29 +140,29 @@ class ChatterManager:
logger.error("schedule unread cleanup failed", stream_id=stream_id, error=runtime_error) logger.error("schedule unread cleanup failed", stream_id=stream_id, error=runtime_error)
async def process_stream_context(self, stream_id: str, context: "StreamContext") -> dict: async def process_stream_context(self, stream_id: str, context: "StreamContext") -> dict:
"""处理流上下文""" """
处理流上下文
每个聊天流只能有一个活跃的 Chatter 组件。
选择优先级:明确指定聊天类型的 Chatter > ALL 类型的 Chatter
"""
chat_type = context.chat_type chat_type = context.chat_type
chat_type_value = chat_type.value chat_type_value = chat_type.value
logger.debug("处理流上下文", stream_id=stream_id, chat_type=chat_type_value) logger.debug("处理流上下文", stream_id=stream_id, chat_type=chat_type_value)
self._ensure_chatter_registry() self._ensure_chatter_registry()
chatter_class = self.get_chatter_class(chat_type) # 检查是否已有该流的 Chatter 实例
if not chatter_class: stream_instance = self.instances.get(stream_id)
all_chatter_class = self.get_chatter_class(ChatType.ALL)
if all_chatter_class: if stream_instance is None:
chatter_class = all_chatter_class # 使用新的优先级选择逻辑获取最佳 Chatter 类
logger.info( chatter_class = self.get_chatter_class_for_chat_type(chat_type)
"回退到通用聊天处理器",
stream_id=stream_id, if not chatter_class:
requested_type=chat_type_value,
fallback=ChatType.ALL.value,
)
else:
raise ValueError(f"No chatter registered for chat type {chat_type}") raise ValueError(f"No chatter registered for chat type {chat_type}")
stream_instance = self.instances.get(stream_id) # 创建新实例
if stream_instance is None:
stream_instance = chatter_class(stream_id=stream_id, action_manager=self.action_manager) stream_instance = chatter_class(stream_id=stream_id, action_manager=self.action_manager)
self.instances[stream_id] = stream_instance self.instances[stream_id] = stream_instance
logger.info( logger.info(
@@ -143,6 +171,13 @@ class ChatterManager:
chatter_class=chatter_class.__name__, chatter_class=chatter_class.__name__,
chat_type=chat_type_value, chat_type=chat_type_value,
) )
else:
# 已有实例,直接使用(每个流只有一个活跃的 Chatter
logger.debug(
"使用已有聊天处理器实例",
stream_id=stream_id,
chatter_class=stream_instance.__class__.__name__,
)
self.stats["streams_processed"] += 1 self.stats["streams_processed"] += 1
try: try:

View File

@@ -55,6 +55,11 @@ class StreamLoopManager:
# 流循环启动锁:防止并发启动同一个流的多个循环任务 # 流循环启动锁:防止并发启动同一个流的多个循环任务
self._stream_start_locks: dict[str, asyncio.Lock] = {} self._stream_start_locks: dict[str, asyncio.Lock] = {}
# 死锁检测:记录每个流的最后活动时间
self._stream_last_activity: dict[str, float] = {}
self._deadlock_detector_task: asyncio.Task | None = None
self._deadlock_threshold_seconds: float = 120.0 # 2分钟无活动视为可能死锁
logger.info(f"流循环管理器初始化完成 (最大并发流数: {self.max_concurrent_streams})") logger.info(f"流循环管理器初始化完成 (最大并发流数: {self.max_concurrent_streams})")
@@ -65,6 +70,60 @@ class StreamLoopManager:
return return
self.is_running = True self.is_running = True
# 启动死锁检测器
self._deadlock_detector_task = asyncio.create_task(
self._deadlock_detector_loop(),
name="deadlock_detector"
)
logger.info("死锁检测器已启动")
async def _deadlock_detector_loop(self) -> None:
"""死锁检测循环 - 定期检查所有流的活动状态"""
while self.is_running:
try:
await asyncio.sleep(30.0) # 每30秒检查一次
current_time = time.time()
suspected_deadlocks = []
# 检查所有活跃流的最后活动时间
for stream_id, last_activity in list(self._stream_last_activity.items()):
inactive_seconds = current_time - last_activity
if inactive_seconds > self._deadlock_threshold_seconds:
suspected_deadlocks.append((stream_id, inactive_seconds))
if suspected_deadlocks:
logger.warning(
f"🔴 [死锁检测] 发现 {len(suspected_deadlocks)} 个可能卡住的流:\n" +
"\n".join([
f" - stream={sid[:8]}, 无活动时间={inactive:.1f}s"
for sid, inactive in suspected_deadlocks
])
)
# 打印当前所有 asyncio 任务的状态
all_tasks = asyncio.all_tasks()
stream_loop_tasks = [t for t in all_tasks if t.get_name().startswith("stream_loop_")]
logger.warning(
f"🔴 [死锁检测] 当前流循环任务状态:\n" +
"\n".join([
f" - {t.get_name()}: done={t.done()}, cancelled={t.cancelled()}"
for t in stream_loop_tasks
])
)
else:
# 每5分钟报告一次正常状态
if int(current_time) % 300 < 30:
active_count = len(self._stream_last_activity)
if active_count > 0:
logger.info(f"🟢 [死锁检测] 所有 {active_count} 个流正常运行中")
except asyncio.CancelledError:
logger.info("死锁检测器被取消")
break
except Exception as e:
logger.error(f"死锁检测器出错: {e}")
async def stop(self) -> None: async def stop(self) -> None:
"""停止流循环管理器""" """停止流循环管理器"""
@@ -72,6 +131,15 @@ class StreamLoopManager:
return return
self.is_running = False self.is_running = False
# 停止死锁检测器
if self._deadlock_detector_task and not self._deadlock_detector_task.done():
self._deadlock_detector_task.cancel()
try:
await self._deadlock_detector_task
except asyncio.CancelledError:
pass
logger.info("死锁检测器已停止")
# 取消所有流循环 # 取消所有流循环
try: try:
@@ -217,11 +285,24 @@ class StreamLoopManager:
""" """
task_id = id(asyncio.current_task()) task_id = id(asyncio.current_task())
logger.info(f"🔄 [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 启动") logger.info(f"🔄 [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 启动")
# 死锁检测:记录循环次数和上次活动时间
loop_count = 0
# 注册到活动跟踪
self._stream_last_activity[stream_id] = time.time()
try: try:
while self.is_running: while self.is_running:
loop_count += 1
loop_start_time = time.time()
# 更新活动时间(死锁检测用)
self._stream_last_activity[stream_id] = loop_start_time
try: try:
# 1. 获取流上下文 # 1. 获取流上下文
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count}, 获取上下文...")
context = await self._get_stream_context(stream_id) context = await self._get_stream_context(stream_id)
if not context: if not context:
logger.warning(f"⚠️ [流工作器] stream={stream_id[:8]}, 无法获取流上下文") logger.warning(f"⚠️ [流工作器] stream={stream_id[:8]}, 无法获取流上下文")
@@ -229,6 +310,7 @@ class StreamLoopManager:
continue continue
# 2. 检查是否有消息需要处理 # 2. 检查是否有消息需要处理
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count}, 刷新缓存消息...")
await self._flush_cached_messages_to_unread(stream_id) await self._flush_cached_messages_to_unread(stream_id)
unread_count = self._get_unread_count(context) unread_count = self._get_unread_count(context)
force_dispatch = self._needs_force_dispatch_for_context(context, unread_count) force_dispatch = self._needs_force_dispatch_for_context(context, unread_count)
@@ -248,13 +330,36 @@ class StreamLoopManager:
logger.debug(f"更新流能量失败 {stream_id}: {e}") logger.debug(f"更新流能量失败 {stream_id}: {e}")
# 4. 激活chatter处理 # 4. 激活chatter处理
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count}, 开始chatter处理...")
try: try:
if global_config is None: # 在长时间处理期间定期更新活动时间,避免死锁检测误报
raise RuntimeError("Global config is not initialized") async def process_with_activity_update():
success = await asyncio.wait_for(self._process_stream_messages(stream_id, context), global_config.chat.thinking_timeout) process_task = asyncio.create_task(
self._process_stream_messages(stream_id, context)
)
activity_update_interval = 30.0 # 每30秒更新一次
while not process_task.done():
try:
# 等待任务完成或超时
await asyncio.wait_for(
asyncio.shield(process_task),
timeout=activity_update_interval
)
except asyncio.TimeoutError:
# 任务仍在运行,更新活动时间
self._stream_last_activity[stream_id] = time.time()
logger.debug(f"🔄 [流工作器] stream={stream_id[:8]}, 处理中,更新活动时间")
return await process_task
success = await asyncio.wait_for(
process_with_activity_update(),
global_config.chat.thinking_timeout
)
except asyncio.TimeoutError: except asyncio.TimeoutError:
logger.warning(f"⏱️ [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 处理超时") logger.warning(f"⏱️ [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 处理超时")
success = False success = False
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count}, chatter处理完成, success={success}")
# 更新统计 # 更新统计
self.stats["total_process_cycles"] += 1 self.stats["total_process_cycles"] += 1
if success: if success:
@@ -265,9 +370,10 @@ class StreamLoopManager:
await asyncio.sleep(0.1) await asyncio.sleep(0.1)
else: else:
self.stats["total_failures"] += 1 self.stats["total_failures"] += 1
logger.warning(f"❌ [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 处理失败") logger.debug(f"❌ [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 处理失败")
# 5. 计算下次检查间隔 # 5. 计算下次检查间隔
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count}, 计算间隔...")
interval = await self._calculate_interval(stream_id, has_messages) interval = await self._calculate_interval(stream_id, has_messages)
# 6. sleep等待下次检查 # 6. sleep等待下次检查
@@ -276,7 +382,22 @@ class StreamLoopManager:
if last_interval is None or abs(interval - last_interval) > 0.01: if last_interval is None or abs(interval - last_interval) > 0.01:
logger.info(f"{stream_id} 等待周期变化: {interval:.2f}s") logger.info(f"{stream_id} 等待周期变化: {interval:.2f}s")
self._last_intervals[stream_id] = interval self._last_intervals[stream_id] = interval
await asyncio.sleep(interval)
loop_duration = time.time() - loop_start_time
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count} 完成, 耗时={loop_duration:.2f}s, 即将sleep {interval:.2f}s")
# 使用分段sleep每隔一段时间更新活动时间避免死锁检测误报
# 当间隔较长时(如等待用户回复),分段更新活动时间
remaining_sleep = interval
activity_update_interval = 30.0 # 每30秒更新一次活动时间
while remaining_sleep > 0:
sleep_chunk = min(remaining_sleep, activity_update_interval)
await asyncio.sleep(sleep_chunk)
remaining_sleep -= sleep_chunk
# 更新活动时间,表明流仍在正常运行(只是在等待)
self._stream_last_activity[stream_id] = time.time()
logger.debug(f"🔍 [流工作器] stream={stream_id[:8]}, 循环#{loop_count} sleep结束, 开始下一循环")
except asyncio.CancelledError: except asyncio.CancelledError:
logger.info(f"🛑 [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 被取消") logger.info(f"🛑 [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 被取消")
@@ -298,6 +419,9 @@ class StreamLoopManager:
# 清理间隔记录 # 清理间隔记录
self._last_intervals.pop(stream_id, None) self._last_intervals.pop(stream_id, None)
# 清理活动跟踪
self._stream_last_activity.pop(stream_id, None)
logger.info(f"🏁 [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 循环结束") logger.info(f"🏁 [流工作器] stream={stream_id[:8]}, 任务ID={task_id}, 循环结束")
@@ -355,8 +479,8 @@ class StreamLoopManager:
# 🔒 防止并发处理:如果已经在处理中,直接返回 # 🔒 防止并发处理:如果已经在处理中,直接返回
if context.is_chatter_processing: if context.is_chatter_processing:
logger.warning(f"🔒 [并发保护] stream={stream_id[:8]}, Chatter 正在处理中,跳过本次处理请求") logger.debug(f"🔒 [并发保护] stream={stream_id[:8]}, Chatter 正在处理中,跳过本次处理请求")
return False return True # 返回 True这是正常的保护机制不是失败
# 设置处理状态为正在处理 # 设置处理状态为正在处理
self._set_stream_processing_status(stream_id, True) self._set_stream_processing_status(stream_id, True)

View File

@@ -1,15 +1,10 @@
import asyncio import asyncio
import time
import traceback import traceback
from typing import Any, TYPE_CHECKING from typing import Any, TYPE_CHECKING
from src.chat.message_receive.chat_stream import get_chat_manager from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.utils.timer_calculator import Timer
from src.common.data_models.database_data_model import DatabaseMessages from src.common.data_models.database_data_model import DatabaseMessages
from src.common.logger import get_logger from src.common.logger import get_logger
from src.config.config import global_config
from src.person_info.person_info import get_person_info_manager
from src.plugin_system.apis import database_api, generator_api, message_api, send_api
from src.plugin_system.base.base_action import BaseAction from src.plugin_system.base.base_action import BaseAction
from src.plugin_system.base.component_types import ActionInfo, ComponentType from src.plugin_system.base.component_types import ActionInfo, ComponentType
from src.plugin_system.core.component_registry import component_registry from src.plugin_system.core.component_registry import component_registry
@@ -22,23 +17,19 @@ logger = get_logger("action_manager")
class ChatterActionManager: class ChatterActionManager:
""" """
动作管理器,用于管理各种类型的动作 动作管理器,用于管理和执行动作
现在统一使用新插件系统,简化了原有的新旧兼容逻辑。 职责:
- 加载和管理可用动作集
- 创建动作实例
- 执行动作(所有动作逻辑在 Action.execute() 中实现)
""" """
def __init__(self): def __init__(self):
"""初始化动作管理器""" """初始化动作管理器"""
# 当前正在使用的动作集合,在规划开始时加载
self._using_actions: dict[str, ActionInfo] = {} self._using_actions: dict[str, ActionInfo] = {}
self.chat_id: str | None = None self.chat_id: str | None = None
self.log_prefix: str = "ChatterActionManager" self.log_prefix: str = "ChatterActionManager"
# 批量存储支持
self._batch_storage_enabled = False
self._pending_actions = []
self._current_chat_id = None
async def load_actions(self, stream_id: str | None): async def load_actions(self, stream_id: str | None):
"""根据 stream_id 加载当前可用的动作""" """根据 stream_id 加载当前可用的动作"""
@@ -46,8 +37,6 @@ class ChatterActionManager:
self._using_actions = component_registry.get_default_actions(stream_id) self._using_actions = component_registry.get_default_actions(stream_id)
logger.debug(f"已为 stream '{stream_id}' 加载 {len(self._using_actions)} 个可用动作: {list(self._using_actions.keys())}") logger.debug(f"已为 stream '{stream_id}' 加载 {len(self._using_actions)} 个可用动作: {list(self._using_actions.keys())}")
# === 执行Action方法 ===
@staticmethod @staticmethod
def create_action( def create_action(
action_name: str, action_name: str,
@@ -72,12 +61,13 @@ class ChatterActionManager:
chat_stream: 聊天流 chat_stream: 聊天流
log_prefix: 日志前缀 log_prefix: 日志前缀
shutting_down: 是否正在关闭 shutting_down: 是否正在关闭
action_message: 目标消息
Returns: Returns:
Optional[BaseAction]: 创建的动作处理器实例如果动作名称未注册则返回None BaseAction | None: 创建的动作处理器实例
""" """
try: try:
# 获取组件类 - 明确指定查询Action类型 # 获取组件类
component_class: type[BaseAction] = component_registry.get_component_class( component_class: type[BaseAction] = component_registry.get_component_class(
action_name, ComponentType.ACTION action_name, ComponentType.ACTION
) # type: ignore ) # type: ignore
@@ -112,8 +102,6 @@ class ChatterActionManager:
except Exception as e: except Exception as e:
logger.error(f"创建Action实例失败 {action_name}: {e}") logger.error(f"创建Action实例失败 {action_name}: {e}")
import traceback
logger.error(traceback.format_exc()) logger.error(traceback.format_exc())
return None return None
@@ -121,17 +109,8 @@ class ChatterActionManager:
"""获取当前正在使用的动作集合""" """获取当前正在使用的动作集合"""
return self._using_actions.copy() return self._using_actions.copy()
# === Modify相关方法 ===
def remove_action_from_using(self, action_name: str) -> bool: def remove_action_from_using(self, action_name: str) -> bool:
""" """从当前使用的动作集中移除指定动作"""
从当前使用的动作集中移除指定动作
Args:
action_name: 动作名称
Returns:
bool: 移除是否成功
"""
if action_name not in self._using_actions: if action_name not in self._using_actions:
logger.warning(f"移除失败: 动作 {action_name} 不在当前使用的动作集中") logger.warning(f"移除失败: 动作 {action_name} 不在当前使用的动作集中")
return False return False
@@ -143,7 +122,6 @@ class ChatterActionManager:
async def restore_actions(self) -> None: async def restore_actions(self) -> None:
"""恢复到当前 stream_id 的默认动作集""" """恢复到当前 stream_id 的默认动作集"""
actions_to_restore = list(self._using_actions.keys()) actions_to_restore = list(self._using_actions.keys())
# 使用 self.chat_id 来恢复当前上下文的动作
await self.load_actions(self.chat_id) await self.load_actions(self.chat_id)
logger.debug(f"恢复动作集: 从 {actions_to_restore} 恢复到 stream '{self.chat_id}' 的默认动作集 {list(self._using_actions.keys())}") logger.debug(f"恢复动作集: 从 {actions_to_restore} 恢复到 stream '{self.chat_id}' 的默认动作集 {list(self._using_actions.keys())}")
@@ -159,31 +137,33 @@ class ChatterActionManager:
clear_unread_messages: bool = True, clear_unread_messages: bool = True,
) -> Any: ) -> Any:
""" """
执行单个动作的通用函数 执行单个动作
所有动作逻辑都在 BaseAction.execute() 中实现
Args: Args:
action_name: 动作名称 action_name: 动作名称
chat_id: 聊天id chat_id: 聊天ID
target_message: 目标消息 target_message: 目标消息
reasoning: 执行理由 reasoning: 执行理由
action_data: 动作数据 action_data: 动作数据
thinking_id: 思考ID thinking_id: 思考ID
log_prefix: 日志前缀 log_prefix: 日志前缀
clear_unread_messages: 是否清除未读消息
Returns: Returns:
执行结果 执行结果字典
""" """
assert global_config is not None assert global_config is not None
chat_stream = None chat_stream = None
try: try:
logger.debug(f"🎯 [ActionManager] execute_action接收到 target_message: {target_message}") # 获取 chat_stream
# 通过chat_id获取chat_stream
chat_manager = get_chat_manager() chat_manager = get_chat_manager()
chat_stream = await chat_manager.get_stream(chat_id) chat_stream = await chat_manager.get_stream(chat_id)
if not chat_stream: if not chat_stream:
logger.error(f"{log_prefix} 无法找到chat_id对应的chat_stream: {chat_id}") logger.error(f"{log_prefix} 无法找到 chat_stream: {chat_id}")
return { return {
"action_type": action_name, "action_type": action_name,
"success": False, "success": False,
@@ -191,182 +171,75 @@ class ChatterActionManager:
"error": "chat_stream not found", "error": "chat_stream not found",
} }
# 设置正在回复的状态 # 设置正在处理的状态
chat_stream.context.is_replying = True chat_stream.context.is_replying = True
if action_name == "no_action": # no_action / do_nothing 特殊处理
return {"action_type": "no_action", "success": True, "reply_text": "", "command": ""} if action_name in ("no_action", "do_nothing"):
return {"action_type": action_name, "success": True, "reply_text": ""}
if action_name == "no_reply": # 创建并执行动作
# 直接处理no_reply逻辑不再通过动作系统 action_handler = self.create_action(
reason = reasoning or "选择不回复" action_name=action_name,
logger.info(f"{log_prefix} 选择不回复,原因: {reason}") action_data=action_data or {},
reasoning=reasoning,
# 存储no_reply信息到数据库支持批量存储 cycle_timers={},
if self._batch_storage_enabled: thinking_id=thinking_id or "",
self.add_action_to_batch( chat_stream=chat_stream,
action_name="no_reply", log_prefix=log_prefix or self.log_prefix,
action_data={"reason": reason}, action_message=target_message,
thinking_id=thinking_id or "", )
action_done=True,
action_build_into_prompt=False,
action_prompt_display=reason,
)
else:
asyncio.create_task(database_api.store_action_info(
chat_stream=chat_stream,
action_build_into_prompt=False,
action_prompt_display=reason,
action_done=True,
thinking_id=thinking_id or "",
action_data={"reason": reason},
action_name="no_reply",
))
return {"action_type": "no_reply", "success": True, "reply_text": "", "command": ""}
elif action_name != "reply" and action_name != "respond" and action_name != "no_action":
# 执行普通动作
success, reply_text, command = await self._handle_action(
chat_stream,
action_name,
reasoning,
action_data or {},
{}, # cycle_timers
thinking_id,
target_message,
)
# 记录执行的动作到目标消息
if success:
asyncio.create_task(self._record_action_to_message(chat_stream, action_name, target_message, action_data))
# 重置打断计数
await self._reset_interruption_count_after_action(chat_stream.stream_id)
if not action_handler:
logger.error(f"{log_prefix} 创建动作处理器失败: {action_name}")
return { return {
"action_type": action_name, "action_type": action_name,
"success": success, "success": False,
"reply_text": reply_text, "reply_text": "",
"command": command, "error": f"Failed to create action handler: {action_name}",
} }
else:
# 检查目标消息是否为表情包消息以及配置是否允许回复表情包
if target_message and getattr(target_message, "is_emoji", False):
# 如果是表情包消息且配置不允许回复表情包,则跳过回复
if not getattr(global_config.chat, "allow_reply_to_emoji", True):
logger.info(f"{log_prefix} 目标消息为表情包且配置不允许回复表情包,跳过回复")
return {"action_type": action_name, "success": True, "reply_text": "", "skip_reason": "emoji_not_allowed"}
# 生成回复 (reply 或 respond) # 执行动作
# reply: 针对单条消息的回复,使用 s4u 模板 success, reply_text = await action_handler.handle_action()
# respond: 对未读消息的统一回应,使用 normal 模板
try:
# 根据动作类型确定提示词模式
prompt_mode = "s4u" if action_name == "reply" else "normal"
# 将prompt_mode传递给generate_reply # 记录动作到消息并存储动作信息
action_data_with_mode = (action_data or {}).copy() if success:
action_data_with_mode["prompt_mode"] = prompt_mode asyncio.create_task(self._record_action_to_message(chat_stream, action_name, target_message, action_data))
asyncio.create_task(self._reset_interruption_count(chat_stream.stream_id))
# 只传递当前正在执行的动作,而不是所有可用动作 # 统一存储动作信息
# 这样可以让LLM明确知道"已决定执行X动作",而不是"有这些动作可用" asyncio.create_task(
current_action_info = self._using_actions.get(action_name) self._store_action_info(
current_actions: dict[str, Any] = {action_name: current_action_info} if current_action_info else {} action_handler=action_handler,
action_name=action_name,
# 附加目标消息信息(如果存在) reply_text=reply_text,
if target_message: target_message=target_message,
# 提取目标消息的关键信息
target_msg_info = {
"message_id": getattr(target_message, "message_id", ""),
"sender": getattr(target_message.user_info, "user_nickname", "") if hasattr(target_message, "user_info") else "",
"content": getattr(target_message, "processed_plain_text", ""),
"time": getattr(target_message, "time", 0),
}
current_actions["_target_message"] = target_msg_info
success, response_set, _ = await generator_api.generate_reply(
chat_stream=chat_stream,
reply_message=target_message,
action_data=action_data_with_mode,
available_actions=current_actions, # type: ignore
enable_tool=global_config.tool.enable_tool,
request_type="chat.replyer",
from_plugin=False,
) )
if not success or not response_set: )
# 安全地获取 processed_plain_text
if target_message:
msg_text = target_message.processed_plain_text or "未知消息"
else:
msg_text = "未知消息"
logger.info(f"{msg_text} 的回复生成失败") return {
return {"action_type": action_name, "success": False, "reply_text": "", "loop_info": None} "action_type": action_name,
except asyncio.CancelledError: "success": success,
logger.debug(f"{log_prefix} 并行执行:回复生成任务已被取消") "reply_text": reply_text,
return {"action_type": action_name, "success": False, "reply_text": "", "loop_info": None} }
# 从action_data中提取should_quote_reply参数
should_quote_reply = None
if action_data and isinstance(action_data, dict):
should_quote_reply = action_data.get("should_quote_reply", None)
# respond动作默认不引用回复保持对话流畅
if action_name == "respond" and should_quote_reply is None:
should_quote_reply = False
async def _after_reply():
# 发送并存储回复
reply_text, cycle_timers_reply = await self._send_and_store_reply(
chat_stream,
response_set,
asyncio.get_event_loop().time(),
target_message,
{}, # cycle_timers
thinking_id,
[], # actions
should_quote_reply, # 传递should_quote_reply参数
)
# 记录回复动作到目标消息
await self._record_action_to_message(chat_stream, action_name, target_message, action_data)
# 回复成功,重置打断计数
await self._reset_interruption_count_after_action(chat_stream.stream_id)
return reply_text
asyncio.create_task(_after_reply())
return {"action_type": action_name, "success": True}
except Exception as e: except Exception as e:
logger.error(f"{log_prefix} 执行动作时出错: {e}") logger.error(f"{log_prefix} 执行动作时出错: {e}")
logger.error(f"{log_prefix} 错误信息: {traceback.format_exc()}") logger.error(traceback.format_exc())
return { return {
"action_type": action_name, "action_type": action_name,
"success": False, "success": False,
"reply_text": "", "reply_text": "",
"loop_info": None,
"error": str(e), "error": str(e),
} }
finally: finally:
# 确保重置正在回复的状态
if chat_stream: if chat_stream:
chat_stream.context.is_replying = False chat_stream.context.is_replying = False
async def _record_action_to_message(self, chat_stream, action_name, target_message, action_data): async def _record_action_to_message(self, chat_stream, action_name: str, target_message, action_data: dict | None):
""" """记录执行的动作到目标消息"""
记录执行的动作到目标消息中
Args:
chat_stream: ChatStream实例
action_name: 动作名称
target_message: 目标消息
action_data: 动作数据
"""
try: try:
from src.chat.message_manager.message_manager import message_manager from src.chat.message_manager.message_manager import message_manager
# 获取目标消息ID
target_message_id = None target_message_id = None
if target_message: if target_message:
target_message_id = target_message.message_id target_message_id = target_message.message_id
@@ -374,362 +247,66 @@ class ChatterActionManager:
target_message_id = action_data.get("target_message_id") target_message_id = action_data.get("target_message_id")
if not target_message_id: if not target_message_id:
logger.debug(f"无法获取目标消息ID动作: {action_name}")
return return
# 通过message_manager更新消息的动作记录并刷新focus_energy
await message_manager.add_action( await message_manager.add_action(
stream_id=chat_stream.stream_id, message_id=target_message_id, action=action_name stream_id=chat_stream.stream_id,
message_id=target_message_id,
action=action_name,
) )
logger.debug(f"已记录动作 {action_name} 到消息 {target_message_id} 并更新focus_energy") logger.debug(f"已记录动作 {action_name} 到消息 {target_message_id}")
except Exception as e: except Exception as e:
logger.error(f"记录动作到消息失败: {e}") logger.error(f"记录动作到消息失败: {e}")
# 不抛出异常,避免影响主要功能
async def _reset_interruption_count_after_action(self, stream_id: str):
"""在动作执行成功后重置打断计数"""
async def _reset_interruption_count(self, stream_id: str):
"""重置打断计数"""
try: try:
from src.plugin_system.apis.chat_api import get_chat_manager
chat_manager = get_chat_manager() chat_manager = get_chat_manager()
chat_stream = await chat_manager.get_stream(stream_id) chat_stream = await chat_manager.get_stream(stream_id)
if chat_stream: if chat_stream and chat_stream.context.interruption_count > 0:
context = chat_stream.context old_count = chat_stream.context.interruption_count
if context.interruption_count > 0: await chat_stream.context.reset_interruption_count()
old_count = context.interruption_count logger.debug(f"重置打断计数: {old_count} -> 0")
# old_afc_adjustment = context.context.get_afc_threshold_adjustment()
await context.reset_interruption_count()
logger.debug(
f"动作执行成功,重置聊天流 {stream_id} 的打断计数: {old_count} -> 0"
)
except Exception as e: except Exception as e:
logger.warning(f"重置打断计数时出错: {e}") logger.warning(f"重置打断计数时出错: {e}")
async def _handle_action( async def _store_action_info(
self, chat_stream, action, reasoning, action_data, cycle_timers, thinking_id, action_message
) -> tuple[bool, str, str]:
"""
处理具体的动作执行
Args:
chat_stream: ChatStream实例
action: 动作名称
reasoning: 执行理由
action_data: 动作数据
cycle_timers: 循环计时器
thinking_id: 思考ID
action_message: 动作消息
Returns:
tuple: (执行是否成功, 回复文本, 命令文本)
功能说明:
- 创建对应的动作处理器
- 执行动作并捕获异常
- 返回执行结果供上级方法整合
"""
if not chat_stream:
return False, "", ""
try:
# 创建动作处理器
action_handler = self.create_action(
action_name=action,
action_data=action_data,
reasoning=reasoning,
cycle_timers=cycle_timers,
thinking_id=thinking_id,
chat_stream=chat_stream,
log_prefix=self.log_prefix,
action_message=action_message,
)
if not action_handler:
# 动作处理器创建失败,尝试回退机制
logger.warning(f"{self.log_prefix} 创建动作处理器失败: {action},尝试回退方案")
# 获取当前可用的动作
available_actions = self.get_using_actions()
fallback_action = None
# 回退优先级reply > 第一个可用动作
if "reply" in available_actions:
fallback_action = "reply"
elif available_actions:
fallback_action = next(iter(available_actions.keys()))
if fallback_action and fallback_action != action:
logger.info(f"{self.log_prefix} 使用回退动作: {fallback_action}")
action_handler = self.create_action(
action_name=fallback_action,
action_data=action_data,
reasoning=f"原动作'{action}'不可用,自动回退。{reasoning}",
cycle_timers=cycle_timers,
thinking_id=thinking_id,
chat_stream=chat_stream,
log_prefix=self.log_prefix,
action_message=action_message,
)
if not action_handler:
logger.error(f"{self.log_prefix} 回退方案也失败,无法创建任何动作处理器")
return False, "", ""
# 执行动作
success, reply_text = await action_handler.handle_action()
return success, reply_text, ""
except Exception as e:
logger.error(f"{self.log_prefix} 处理{action}时出错: {e}")
traceback.print_exc()
return False, "", ""
async def _send_and_store_reply(
self,
chat_stream: "ChatStream",
response_set,
loop_start_time,
action_message,
cycle_timers: dict[str, float],
thinking_id,
actions,
should_quote_reply: bool | None = None,
) -> tuple[str, dict[str, float]]:
"""
发送并存储回复信息
Args:
chat_stream: ChatStream实例
response_set: 回复内容集合
loop_start_time: 循环开始时间
action_message: 动作消息
cycle_timers: 循环计时器
thinking_id: 思考ID
actions: 动作列表
should_quote_reply: 是否应该引用回复原消息None表示自动决定
Returns:
Tuple[Dict[str, Any], str, Dict[str, float]]: 循环信息, 回复文本, 循环计时器
"""
# 发送回复
with Timer("回复发送", cycle_timers):
reply_text = await self.send_response(
chat_stream, response_set, loop_start_time, action_message, should_quote_reply
)
# 存储reply action信息
person_info_manager = get_person_info_manager()
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
if action_message:
platform = action_message.chat_info.platform
user_id = action_message.user_info.user_id
else:
platform = getattr(chat_stream, "platform", "unknown")
user_id = ""
# 获取用户信息并生成回复提示
person_id = person_info_manager.get_person_id(
platform,
user_id,
)
person_name = await person_info_manager.get_value(person_id, "person_name")
action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
# 存储动作信息到数据库(支持批量存储)
if self._batch_storage_enabled:
self.add_action_to_batch(
action_name="reply",
action_data={"reply_text": reply_text},
thinking_id=thinking_id or "",
action_done=True,
action_build_into_prompt=False,
action_prompt_display=action_prompt_display,
)
else:
await database_api.store_action_info(
chat_stream=chat_stream,
action_build_into_prompt=False,
action_prompt_display=action_prompt_display,
action_done=True,
thinking_id=thinking_id,
action_data={"reply_text": reply_text},
action_name="reply",
)
return reply_text, cycle_timers
async def send_response(
self, chat_stream, reply_set, thinking_start_time, message_data, should_quote_reply: bool | None = None
) -> str:
"""
发送回复内容的具体实现
Args:
chat_stream: ChatStream实例
reply_set: 回复内容集合,包含多个回复段
thinking_start_time: 思考开始时间
message_data: 消息数据
should_quote_reply: 是否应该引用回复原消息None表示自动决定
Returns:
str: 完整的回复文本
功能说明:
- 检查是否有新消息需要回复
- 处理主动思考的"沉默"决定
- 根据消息数量决定是否添加回复引用
- 逐段发送回复内容,支持打字效果
- 正确处理元组格式的回复段
"""
current_time = time.time()
# 计算新消息数量
await message_api.count_new_messages(
chat_id=chat_stream.stream_id, start_time=thinking_start_time, end_time=current_time
)
# 根据新消息数量决定是否需要引用回复
reply_text = ""
# 检查是否为主动思考消息
if message_data:
is_proactive_thinking = getattr(message_data, "message_type", None) == "proactive_thinking"
else:
is_proactive_thinking = True
logger.debug(f"[send_response] message_data: {message_data}")
first_replied = False
for reply_seg in reply_set:
# 调试日志验证reply_seg的格式
logger.debug(f"Processing reply_seg type: {type(reply_seg)}, content: {reply_seg}")
# 修正:正确处理元组格式 (格式为: (type, content))
if isinstance(reply_seg, tuple) and len(reply_seg) >= 2:
_, data = reply_seg
else:
# 向下兼容:如果已经是字符串,则直接使用
data = str(reply_seg)
if isinstance(data, list):
data = "".join(map(str, data))
reply_text += data
# 如果是主动思考且内容为"沉默",则不发送
if is_proactive_thinking and data.strip() == "沉默":
logger.info(f"{self.log_prefix} 主动思考决定保持沉默,不发送消息")
continue
# 发送第一段回复
if not first_replied:
# 决定是否引用回复
is_private_chat = not bool(chat_stream.group_info)
# 如果明确指定了should_quote_reply则使用指定值
if should_quote_reply is not None:
set_reply_flag = should_quote_reply and bool(message_data)
logger.debug(
f"📤 [ActionManager] 使用planner指定的引用设置: should_quote_reply={should_quote_reply}"
)
else:
# 否则使用默认逻辑:默认不引用,让对话更流畅自然
set_reply_flag = False
logger.debug(
f"📤 [ActionManager] 使用默认引用逻辑: 默认不引用(is_private={is_private_chat})"
)
logger.debug(
f"📤 [ActionManager] 准备发送第一段回复。message_data: {message_data}, set_reply: {set_reply_flag}"
)
await send_api.text_to_stream(
text=data,
stream_id=chat_stream.stream_id,
reply_to_message=message_data,
set_reply=set_reply_flag,
typing=False,
)
first_replied = True
else:
# 发送后续回复
await send_api.text_to_stream(
text=data,
stream_id=chat_stream.stream_id,
reply_to_message=None,
set_reply=False,
typing=True,
)
return reply_text
def enable_batch_storage(self, chat_id: str):
"""启用批量存储模式"""
self._batch_storage_enabled = True
self._current_chat_id = chat_id
self._pending_actions.clear()
logger.debug(f"已启用批量存储模式chat_id: {chat_id}")
def disable_batch_storage(self):
"""禁用批量存储模式"""
self._batch_storage_enabled = False
self._current_chat_id = None
self._pending_actions = [] # 清空队列
logger.debug("已禁用批量存储模式")
def add_action_to_batch(
self, self,
action_handler: BaseAction,
action_name: str, action_name: str,
action_data: dict, reply_text: str,
thinking_id: str = "", target_message: DatabaseMessages | None,
action_done: bool = True,
action_build_into_prompt: bool = False,
action_prompt_display: str = "",
): ):
"""添加动作到批量存储列表""" """统一存储动作信息到数据库"""
if not self._batch_storage_enabled:
return False
action_record = {
"action_name": action_name,
"action_data": action_data,
"thinking_id": thinking_id,
"action_done": action_done,
"action_build_into_prompt": action_build_into_prompt,
"action_prompt_display": action_prompt_display,
"timestamp": time.time(),
}
self._pending_actions.append(action_record)
logger.debug(f"已添加动作到批量存储列表: {action_name} (当前待处理: {len(self._pending_actions)} 个)")
return True
async def flush_batch_storage(self, chat_stream):
"""批量存储所有待处理的动作记录"""
if not self._pending_actions:
logger.debug("没有待处理的动作需要批量存储")
return
try: try:
logger.info(f"开始批量存储 {len(self._pending_actions)} 个动作记录") from src.person_info.person_info import get_person_info_manager
from src.plugin_system.apis import database_api
# 批量存储所有动作 # 构建 action_prompt_display
stored_count = 0 action_prompt_display = ""
for action_data in self._pending_actions: if reply_text:
try: person_info_manager = get_person_info_manager()
result = await database_api.store_action_info( if target_message:
chat_stream=chat_stream, platform = target_message.chat_info.platform
action_name=action_data.get("action_name", ""), user_id = target_message.user_info.user_id
action_data=action_data.get("action_data", {}), person_id = person_info_manager.get_person_id(platform, user_id)
action_done=action_data.get("action_done", True), person_name = await person_info_manager.get_value(person_id, "person_name")
action_build_into_prompt=action_data.get("action_build_into_prompt", False), action_prompt_display = f"你对{person_name}进行了回复:{reply_text}"
action_prompt_display=action_data.get("action_prompt_display", ""), else:
thinking_id=action_data.get("thinking_id", ""), action_prompt_display = f"统一回应:{reply_text}"
)
if result:
stored_count += 1
except Exception as e:
logger.error(f"存储单个动作记录失败: {e}")
logger.info(f"批量存储完成: 成功存储 {stored_count}/{len(self._pending_actions)} 个动作记录") # 存储动作信息
await database_api.store_action_info(
# 清空待处理列表 chat_stream=action_handler.chat_stream,
self._pending_actions.clear() action_build_into_prompt=False,
action_prompt_display=action_prompt_display,
action_done=True,
thinking_id=action_handler.thinking_id,
action_data={"reply_text": reply_text} if reply_text else action_handler.action_data,
action_name=action_name,
)
logger.debug(f"已存储动作信息: {action_name}")
except Exception as e: except Exception as e:
logger.error(f"批量存储动作记录时发生错误: {e}") logger.error(f"存储动作信息失败: {e}")

View File

@@ -58,6 +58,7 @@ class ActionModifier:
async def modify_actions( async def modify_actions(
self, self,
message_content: str = "", message_content: str = "",
chatter_name: str = "",
): # sourcery skip: use-named-expression ): # sourcery skip: use-named-expression
""" """
动作修改流程,整合传统观察处理和新的激活类型判定 动作修改流程,整合传统观察处理和新的激活类型判定
@@ -67,6 +68,10 @@ class ActionModifier:
2. 基于激活类型的智能动作判定,最终确定可用动作集 2. 基于激活类型的智能动作判定,最终确定可用动作集
处理后ActionManager 将包含最终的可用动作集,供规划器直接使用 处理后ActionManager 将包含最终的可用动作集,供规划器直接使用
Args:
message_content: 消息内容
chatter_name: 当前使用的 Chatter 名称,用于过滤只允许特定 Chatter 使用的动作
""" """
assert global_config is not None assert global_config is not None
# 初始化log_prefix # 初始化log_prefix
@@ -84,13 +89,14 @@ class ActionModifier:
logger.debug(f"{self.log_prefix}开始完整动作修改流程") logger.debug(f"{self.log_prefix}开始完整动作修改流程")
removals_s0: list[tuple[str, str]] = [] # 第0阶段聊天类型和Chatter过滤
removals_s1: list[tuple[str, str]] = [] removals_s1: list[tuple[str, str]] = []
removals_s2: list[tuple[str, str]] = [] removals_s2: list[tuple[str, str]] = []
removals_s3: list[tuple[str, str]] = [] removals_s3: list[tuple[str, str]] = []
all_actions = self.action_manager.get_using_actions() all_actions = self.action_manager.get_using_actions()
# === 第0阶段根据聊天类型过滤动作 === # === 第0阶段根据聊天类型和Chatter过滤动作 ===
from src.chat.utils.utils import get_chat_type_and_target_info from src.chat.utils.utils import get_chat_type_and_target_info
from src.plugin_system.base.component_types import ChatType, ComponentType from src.plugin_system.base.component_types import ChatType, ComponentType
from src.plugin_system.core.component_registry import component_registry from src.plugin_system.core.component_registry import component_registry
@@ -99,26 +105,35 @@ class ActionModifier:
is_group_chat, _ = await get_chat_type_and_target_info(self.chat_id) is_group_chat, _ = await get_chat_type_and_target_info(self.chat_id)
all_registered_actions = component_registry.get_components_by_type(ComponentType.ACTION) all_registered_actions = component_registry.get_components_by_type(ComponentType.ACTION)
chat_type_removals = []
for action_name in list(all_actions.keys()): for action_name in list(all_actions.keys()):
if action_name in all_registered_actions: if action_name in all_registered_actions:
action_info = all_registered_actions[action_name] action_info = all_registered_actions[action_name]
# 检查聊天类型限制
chat_type_allow = getattr(action_info, "chat_type_allow", ChatType.ALL) chat_type_allow = getattr(action_info, "chat_type_allow", ChatType.ALL)
should_keep_chat_type = (
# 检查是否符合聊天类型限制
should_keep = (
chat_type_allow == ChatType.ALL chat_type_allow == ChatType.ALL
or (chat_type_allow == ChatType.GROUP and is_group_chat) or (chat_type_allow == ChatType.GROUP and is_group_chat)
or (chat_type_allow == ChatType.PRIVATE and not is_group_chat) or (chat_type_allow == ChatType.PRIVATE and not is_group_chat)
) )
if not should_keep: if not should_keep_chat_type:
chat_type_removals.append((action_name, f"不支持{'群聊' if is_group_chat else '私聊'}")) removals_s0.append((action_name, f"不支持{'群聊' if is_group_chat else '私聊'}"))
self.action_manager.remove_action_from_using(action_name) self.action_manager.remove_action_from_using(action_name)
continue
# 检查 Chatter 限制
chatter_allow = getattr(action_info, "chatter_allow", [])
if chatter_allow and chatter_name:
# 如果设置了 chatter_allow 且提供了 chatter_name则检查是否匹配
if chatter_name not in chatter_allow:
removals_s0.append((action_name, f"仅限 {', '.join(chatter_allow)} 使用"))
self.action_manager.remove_action_from_using(action_name)
continue
if chat_type_removals: if removals_s0:
logger.info(f"{self.log_prefix} 第0阶段根据聊天类型过滤 - 移除了 {len(chat_type_removals)} 个动作") logger.info(f"{self.log_prefix} 第0阶段类型/Chatter过滤 - 移除了 {len(removals_s0)} 个动作")
for action_name, reason in chat_type_removals: for action_name, reason in removals_s0:
logger.debug(f"{self.log_prefix} - 移除 {action_name}: {reason}") logger.debug(f"{self.log_prefix} - 移除 {action_name}: {reason}")
message_list_before_now_half = await get_raw_msg_before_timestamp_with_chat( message_list_before_now_half = await get_raw_msg_before_timestamp_with_chat(
@@ -174,7 +189,7 @@ class ActionModifier:
logger.debug(f"{self.log_prefix}阶段三移除动作: {action_name},原因: {reason}") logger.debug(f"{self.log_prefix}阶段三移除动作: {action_name},原因: {reason}")
# === 统一日志记录 === # === 统一日志记录 ===
all_removals = chat_type_removals + removals_s1 + removals_s2 + removals_s3 all_removals = removals_s0 + removals_s1 + removals_s2 + removals_s3
removals_summary: str = "" removals_summary: str = ""
if all_removals: if all_removals:
removals_summary = " | ".join([f"{name}({reason})" for name, reason in all_removals]) removals_summary = " | ".join([f"{name}({reason})" for name, reason in all_removals])

View File

@@ -166,18 +166,18 @@ class StreamContext(BaseDataModel):
"platform": message.chat_info.platform, "platform": message.chat_info.platform,
"stream_id": self.stream_id, "stream_id": self.stream_id,
} }
await unified_manager.add_message(message_dict) # type: ignore await unified_manager.add_message(message_dict)
logger.debug(f"<EFBFBD><EFBFBD>Ϣ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ӵ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ϵͳ: {message.message_id}") logger.debug(f"消息已添加到统一记忆系统: {message.message_id}")
except Exception as e: except Exception as e:
logger.error(f"<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϣ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ϵͳʧ<EFBFBD><EFBFBD>: {e}") logger.error(f"添加消息到统一记忆系统失败: {e}")
return True return True
except Exception as e: except Exception as e:
logger.error(f"<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϣ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>ʧ<EFBFBD><EFBFBD> {self.stream_id}: {e}") logger.error(f"添加消息到上下文失败 {self.stream_id}: {e}")
return False return False
async def update_message(self, message_id: str, updates: dict[str, Any]) -> bool: async def update_message(self, message_id: str, updates: dict[str, Any]) -> bool:
"""<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>е<EFBFBD><EFBFBD><EFBFBD>Ϣ""" """更新上下文中的消息信息"""
try: try:
for message in self.unread_messages: for message in self.unread_messages:
if str(message.message_id) == str(message_id): if str(message.message_id) == str(message_id):
@@ -199,10 +199,10 @@ class StreamContext(BaseDataModel):
message.should_reply = updates["should_reply"] message.should_reply = updates["should_reply"]
break break
logger.debug(f"<EFBFBD><EFBFBD><EFBFBD>µ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϣ: {self.stream_id}/{message_id}") logger.debug(f"更新消息信息: {self.stream_id}/{message_id}")
return True return True
except Exception as e: except Exception as e:
logger.error(f"<EFBFBD><EFBFBD><EFBFBD>µ<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>Ϣʧ<EFBFBD><EFBFBD> {self.stream_id}/{message_id}: {e}") logger.error(f"更新消息信息失败 {self.stream_id}/{message_id}: {e}")
return False return False
def add_action_to_message(self, message_id: str, action: str): def add_action_to_message(self, message_id: str, action: str):

View File

@@ -150,7 +150,6 @@ class CRUDBase(Generic[T]):
cache = await get_cache() cache = await get_cache()
cached_dict = await cache.get(cache_key) cached_dict = await cache.get(cache_key)
if cached_dict is not None: if cached_dict is not None:
logger.debug(f"缓存命中: {cache_key}")
# 从字典恢复对象 # 从字典恢复对象
return _dict_to_model(self.model, cached_dict) return _dict_to_model(self.model, cached_dict)
@@ -195,7 +194,6 @@ class CRUDBase(Generic[T]):
cache = await get_cache() cache = await get_cache()
cached_dict = await cache.get(cache_key) cached_dict = await cache.get(cache_key)
if cached_dict is not None: if cached_dict is not None:
logger.debug(f"缓存命中: {cache_key}")
# 从字典恢复对象 # 从字典恢复对象
return _dict_to_model(self.model, cached_dict) return _dict_to_model(self.model, cached_dict)
@@ -248,7 +246,6 @@ class CRUDBase(Generic[T]):
cache = await get_cache() cache = await get_cache()
cached_dicts = await cache.get(cache_key) cached_dicts = await cache.get(cache_key)
if cached_dicts is not None: if cached_dicts is not None:
logger.debug(f"缓存命中: {cache_key}")
# 从字典列表恢复对象列表 # 从字典列表恢复对象列表
return [_dict_to_model(self.model, d) for d in cached_dicts] # type: ignore return [_dict_to_model(self.model, d) for d in cached_dicts] # type: ignore

View File

@@ -199,7 +199,6 @@ class QueryBuilder(Generic[T]):
cache = await get_cache() cache = await get_cache()
cached_dicts = await cache.get(cache_key) cached_dicts = await cache.get(cache_key)
if cached_dicts is not None: if cached_dicts is not None:
logger.debug(f"缓存命中: {cache_key}")
dict_rows = [dict(row) for row in cached_dicts] dict_rows = [dict(row) for row in cached_dicts]
if as_dict: if as_dict:
return dict_rows return dict_rows
@@ -238,7 +237,6 @@ class QueryBuilder(Generic[T]):
cache = await get_cache() cache = await get_cache()
cached_dict = await cache.get(cache_key) cached_dict = await cache.get(cache_key)
if cached_dict is not None: if cached_dict is not None:
logger.debug(f"缓存命中: {cache_key}")
row = dict(cached_dict) row = dict(cached_dict)
if as_dict: if as_dict:
return row return row
@@ -277,7 +275,6 @@ class QueryBuilder(Generic[T]):
cache = await get_cache() cache = await get_cache()
cached = await cache.get(cache_key) cached = await cache.get(cache_key)
if cached is not None: if cached is not None:
logger.debug(f"缓存命中: {cache_key}")
return cached return cached
# 构建count查询 # 构建count查询

View File

@@ -194,7 +194,6 @@ def cached(
cached_result = await cache.get(cache_key) cached_result = await cache.get(cache_key)
if cached_result is not None: if cached_result is not None:
logger.debug(f"缓存命中: {cache_key}")
return cached_result return cached_result
# 执行函数 # 执行函数

View File

@@ -645,6 +645,12 @@ DEFAULT_MODULE_COLORS = {
"context_web": "#5F5F00", # 深黄色 "context_web": "#5F5F00", # 深黄色
"gift_manager": "#D7005F", # 粉红色 "gift_manager": "#D7005F", # 粉红色
"prompt": "#875FFF", # 紫色mais4u的prompt "prompt": "#875FFF", # 紫色mais4u的prompt
# Kokoro Flow Chatter (KFC) 系统
"kfc_planner": "#b19cd9", # 淡紫色 - KFC 规划器
"kfc_replyer": "#b19cd9", # 淡紫色 - KFC 回复器
"kfc_chatter": "#b19cd9", # 淡紫色 - KFC 主模块
"kfc_unified": "#d7afff", # 柔和紫色 - KFC 统一模式
"kfc_proactive_thinker": "#d7afff", # 柔和紫色 - KFC 主动思考器
"super_chat_manager": "#AF005F", # 紫红色 "super_chat_manager": "#AF005F", # 紫红色
"watching": "#AF5F5F", # 深橙色 "watching": "#AF5F5F", # 深橙色
"offline_llm": "#303030", # 深灰色 "offline_llm": "#303030", # 深灰色
@@ -681,6 +687,15 @@ DEFAULT_MODULE_COLORS = {
"AioHTTP-Gemini客户端": "#5FD7FF", "AioHTTP-Gemini客户端": "#5FD7FF",
"napcat_adapter": "#5F87AF", # 柔和的灰蓝色,不刺眼且低调 "napcat_adapter": "#5F87AF", # 柔和的灰蓝色,不刺眼且低调
"event_manager": "#5FD7AF", # 柔和的蓝绿色,稍微醒目但不刺眼 "event_manager": "#5FD7AF", # 柔和的蓝绿色,稍微醒目但不刺眼
# Kokoro Flow Chatter (KFC) 相关 - 超融合架构专用颜色
"kokoro_flow_chatter": "#FF5FAF", # 粉紫色 - 主聊天器
"kokoro_prompt_generator": "#00D7FF", # 青色 - Prompt构建
"kokoro_action_executor": "#FFFF00", # 黄色 - 动作解析与执行
"kfc_context_builder": "#5FD7FF", # 蓝色 - 上下文构建
"kfc_session_manager": "#87D787", # 绿色 - 会话管理
"kfc_scheduler": "#D787AF", # 柔和粉色 - 调度器
"kfc_post_processor": "#5F87FF", # 蓝色 - 后处理
"kfc_unified": "#FF5FAF", # 粉色 - 统一模式
} }
DEFAULT_MODULE_ALIASES = { DEFAULT_MODULE_ALIASES = {
@@ -809,6 +824,15 @@ DEFAULT_MODULE_ALIASES = {
"db_migration": "数据库迁移", "db_migration": "数据库迁移",
"小彩蛋": "小彩蛋", "小彩蛋": "小彩蛋",
"AioHTTP-Gemini客户端": "AioHTTP-Gemini客户端", "AioHTTP-Gemini客户端": "AioHTTP-Gemini客户端",
# Kokoro Flow Chatter (KFC) 超融合架构相关
"kokoro_flow_chatter": "心流聊天",
"kokoro_prompt_generator": "KFC提示词",
"kokoro_action_executor": "KFC动作",
"kfc_context_builder": "KFC上下文",
"kfc_session_manager": "KFC会话",
"kfc_scheduler": "KFC调度",
"kfc_post_processor": "KFC后处理",
"kfc_unified": "KFC统一模式",
} }

View File

@@ -25,6 +25,7 @@ from src.config.official_configs import (
EmojiConfig, EmojiConfig,
ExperimentalConfig, ExperimentalConfig,
ExpressionConfig, ExpressionConfig,
KokoroFlowChatterConfig,
LPMMKnowledgeConfig, LPMMKnowledgeConfig,
MessageBusConfig, MessageBusConfig,
MemoryConfig, MemoryConfig,
@@ -425,6 +426,9 @@ class Config(ValidatedConfigBase):
proactive_thinking: ProactiveThinkingConfig = Field( proactive_thinking: ProactiveThinkingConfig = Field(
default_factory=lambda: ProactiveThinkingConfig(), description="主动思考配置" default_factory=lambda: ProactiveThinkingConfig(), description="主动思考配置"
) )
kokoro_flow_chatter: KokoroFlowChatterConfig = Field(
default_factory=lambda: KokoroFlowChatterConfig(), description="心流对话系统配置(私聊专用)"
)
plugin_http_system: PluginHttpSystemConfig = Field( plugin_http_system: PluginHttpSystemConfig = Field(
default_factory=lambda: PluginHttpSystemConfig(), description="插件HTTP端点系统配置" default_factory=lambda: PluginHttpSystemConfig(), description="插件HTTP端点系统配置"
) )

View File

@@ -888,3 +888,85 @@ class ProactiveThinkingConfig(ValidatedConfigBase):
# --- 新增:调试与监控 --- # --- 新增:调试与监控 ---
enable_statistics: bool = Field(default=True, description="是否启用统计功能(记录触发次数、决策分布等)") enable_statistics: bool = Field(default=True, description="是否启用统计功能(记录触发次数、决策分布等)")
log_decisions: bool = Field(default=False, description="是否记录每次决策的详细日志(用于调试)") log_decisions: bool = Field(default=False, description="是否记录每次决策的详细日志(用于调试)")
class KokoroFlowChatterProactiveConfig(ValidatedConfigBase):
"""
Kokoro Flow Chatter 主动思考子配置
设计哲学:主动行为源于内部状态和外部环境的自然反应,而非机械的限制。
她的主动是因为挂念、因为关心、因为想问候,而不是因为"任务"
"""
enabled: bool = Field(default=True, description="是否启用KFC的私聊主动思考")
# 1. 沉默触发器:当感到长久的沉默时,她可能会想说些什么
silence_threshold_seconds: int = Field(
default=7200, ge=60, le=86400,
description="用户沉默超过此时长可能触发主动思考默认2小时"
)
# 2. 关系门槛:她不会对不熟悉的人过于主动
min_affinity_for_proactive: float = Field(
default=0.3, ge=0.0, le=1.0,
description="需要达到最低好感度,她才会开始主动关心"
)
# 3. 频率呼吸:为了避免打扰,她的关心总是有间隔的
min_interval_between_proactive: int = Field(
default=1800, ge=0,
description="两次主动思考之间的最小间隔默认30分钟"
)
# 4. 自然问候:在特定的时间,她会像朋友一样送上问候
enable_morning_greeting: bool = Field(
default=True, description="是否启用早安问候 (例如: 8:00 - 9:00)"
)
enable_night_greeting: bool = Field(
default=True, description="是否启用晚安问候 (例如: 22:00 - 23:00)"
)
# 5. 勿扰时段:在这段时间内不会主动发起对话
quiet_hours_start: str = Field(
default="23:00", description="勿扰时段开始时间,格式: HH:MM"
)
quiet_hours_end: str = Field(
default="07:00", description="勿扰时段结束时间,格式: HH:MM"
)
# 6. 触发概率:每次检查时主动发起的概率
trigger_probability: float = Field(
default=0.3, ge=0.0, le=1.0,
description="主动思考触发概率0.0~1.0),用于避免过于频繁打扰"
)
class KokoroFlowChatterConfig(ValidatedConfigBase):
"""
Kokoro Flow Chatter 配置类 - 私聊专用心流对话系统
设计理念KFC不是独立人格它复用全局的人设、情感框架和回复模型
只作为Bot核心人格在私聊中的一种特殊表现模式。
"""
# --- 总开关 ---
enable: bool = Field(
default=True,
description="开启后KFC将接管所有私聊消息关闭后私聊消息将由AFC处理"
)
# --- 核心行为配置 ---
max_wait_seconds_default: int = Field(
default=300, ge=30, le=3600,
description="默认的最大等待秒数AI发送消息后愿意等待用户回复的时间"
)
enable_continuous_thinking: bool = Field(
default=True,
description="是否在等待期间启用心理活动更新"
)
# --- 私聊专属主动思考配置 ---
proactive_thinking: KokoroFlowChatterProactiveConfig = Field(
default_factory=KokoroFlowChatterProactiveConfig,
description="私聊专属主动思考配置"
)

View File

@@ -1,9 +1,13 @@
""" """
向量存储层:基于 ChromaDB 的语义向量存储 向量存储层:基于 ChromaDB 的语义向量存储
注意ChromaDB 是同步库,所有操作都必须使用 asyncio.to_thread() 包装
以避免阻塞 asyncio 事件循环导致死锁。
""" """
from __future__ import annotations from __future__ import annotations
import asyncio
from pathlib import Path from pathlib import Path
from typing import Any from typing import Any
@@ -53,22 +57,30 @@ class VectorStore:
import chromadb import chromadb
from chromadb.config import Settings from chromadb.config import Settings
# 创建持久化客户端 # 创建持久化客户端 - 同步操作需要在线程中执行
self.client = chromadb.PersistentClient( def _create_client():
path=str(self.data_dir / "chroma"), return chromadb.PersistentClient(
settings=Settings( path=str(self.data_dir / "chroma"),
anonymized_telemetry=False, settings=Settings(
allow_reset=True, anonymized_telemetry=False,
), allow_reset=True,
) ),
)
self.client = await asyncio.to_thread(_create_client)
# 获取或创建集合 # 获取或创建集合 - 同步操作需要在线程中执行
self.collection = self.client.get_or_create_collection( def _get_or_create_collection():
name=self.collection_name, return self.client.get_or_create_collection(
metadata={"description": "Memory graph node embeddings"}, name=self.collection_name,
) metadata={"description": "Memory graph node embeddings"},
)
self.collection = await asyncio.to_thread(_get_or_create_collection)
logger.debug(f"ChromaDB 初始化完成,集合包含 {self.collection.count()} 个节点") # count() 也是同步操作
count = await asyncio.to_thread(self.collection.count)
logger.debug(f"ChromaDB 初始化完成,集合包含 {count} 个节点")
except Exception as e: except Exception as e:
logger.error(f"初始化 ChromaDB 失败: {e}") logger.error(f"初始化 ChromaDB 失败: {e}")
@@ -106,12 +118,16 @@ class VectorStore:
else: else:
metadata[key] = str(value) metadata[key] = str(value)
self.collection.add( # ChromaDB add() 是同步阻塞操作,必须在线程中执行
ids=[node.id], def _add_node():
embeddings=[node.embedding.tolist()], self.collection.add(
metadatas=[metadata], ids=[node.id],
documents=[node.content], # 文本内容用于检索 embeddings=[node.embedding.tolist()],
) metadatas=[metadata],
documents=[node.content],
)
await asyncio.to_thread(_add_node)
logger.debug(f"添加节点到向量存储: {node}") logger.debug(f"添加节点到向量存储: {node}")
@@ -155,12 +171,16 @@ class VectorStore:
metadata[key] = str(value) metadata[key] = str(value)
metadatas.append(metadata) metadatas.append(metadata)
self.collection.add( # ChromaDB add() 是同步阻塞操作,必须在线程中执行
ids=[n.id for n in valid_nodes], def _add_batch():
embeddings=[n.embedding.tolist() for n in valid_nodes], # type: ignore self.collection.add(
metadatas=metadatas, ids=[n.id for n in valid_nodes],
documents=[n.content for n in valid_nodes], embeddings=[n.embedding.tolist() for n in valid_nodes], # type: ignore
) metadatas=metadatas,
documents=[n.content for n in valid_nodes],
)
await asyncio.to_thread(_add_batch)
except Exception as e: except Exception as e:
logger.error(f"批量添加节点失败: {e}") logger.error(f"批量添加节点失败: {e}")
@@ -194,12 +214,15 @@ class VectorStore:
if node_types: if node_types:
where_filter = {"node_type": {"$in": [nt.value for nt in node_types]}} where_filter = {"node_type": {"$in": [nt.value for nt in node_types]}}
# 执行查询 # ChromaDB query() 是同步阻塞操作,必须在线程中执行
results = self.collection.query( def _query():
query_embeddings=[query_embedding.tolist()], return self.collection.query(
n_results=limit, query_embeddings=[query_embedding.tolist()],
where=where_filter, n_results=limit,
) where=where_filter,
)
results = await asyncio.to_thread(_query)
# 解析结果 # 解析结果
import orjson import orjson
@@ -360,7 +383,11 @@ class VectorStore:
raise RuntimeError("向量存储未初始化") raise RuntimeError("向量存储未初始化")
try: try:
result = self.collection.get(ids=[node_id], include=["metadatas", "embeddings"]) # ChromaDB get() 是同步阻塞操作,必须在线程中执行
def _get():
return self.collection.get(ids=[node_id], include=["metadatas", "embeddings"])
result = await asyncio.to_thread(_get)
# 修复:直接检查 ids 列表是否非空(避免 numpy 数组的布尔值歧义) # 修复:直接检查 ids 列表是否非空(避免 numpy 数组的布尔值歧义)
if result is not None: if result is not None:
@@ -378,7 +405,8 @@ class VectorStore:
return None return None
except Exception as e: except Exception as e:
logger.error(f"获取节点失败: {e}") # 节点不存在是正常情况,降级为 debug
logger.debug(f"获取节点失败(节点可能不存在): {e}")
return None return None
async def delete_node(self, node_id: str) -> None: async def delete_node(self, node_id: str) -> None:
@@ -392,7 +420,11 @@ class VectorStore:
raise RuntimeError("向量存储未初始化") raise RuntimeError("向量存储未初始化")
try: try:
self.collection.delete(ids=[node_id]) # ChromaDB delete() 是同步阻塞操作,必须在线程中执行
def _delete():
self.collection.delete(ids=[node_id])
await asyncio.to_thread(_delete)
logger.debug(f"删除节点: {node_id}") logger.debug(f"删除节点: {node_id}")
except Exception as e: except Exception as e:
@@ -411,7 +443,11 @@ class VectorStore:
raise RuntimeError("向量存储未初始化") raise RuntimeError("向量存储未初始化")
try: try:
self.collection.update(ids=[node_id], embeddings=[embedding.tolist()]) # ChromaDB update() 是同步阻塞操作,必须在线程中执行
def _update():
self.collection.update(ids=[node_id], embeddings=[embedding.tolist()])
await asyncio.to_thread(_update)
logger.debug(f"更新节点 embedding: {node_id}") logger.debug(f"更新节点 embedding: {node_id}")
except Exception as e: except Exception as e:
@@ -419,10 +455,16 @@ class VectorStore:
raise raise
def get_total_count(self) -> int: def get_total_count(self) -> int:
"""获取向量存储中的节点总数""" """获取向量存储中的节点总数(同步方法,谨慎在 async 上下文中使用)"""
if not self.collection: if not self.collection:
return 0 return 0
return self.collection.count() return self.collection.count()
async def get_total_count_async(self) -> int:
"""异步获取向量存储中的节点总数"""
if not self.collection:
return 0
return await asyncio.to_thread(self.collection.count)
async def clear(self) -> None: async def clear(self) -> None:
"""清空向量存储(危险操作,仅用于测试)""" """清空向量存储(危险操作,仅用于测试)"""
@@ -430,12 +472,15 @@ class VectorStore:
return return
try: try:
# 删除并重新创建集合 # ChromaDB delete_collection 和 get_or_create_collection 都是同步阻塞操作
self.client.delete_collection(self.collection_name) def _clear():
self.collection = self.client.get_or_create_collection( self.client.delete_collection(self.collection_name)
name=self.collection_name, return self.client.get_or_create_collection(
metadata={"description": "Memory graph node embeddings"}, name=self.collection_name,
) metadata={"description": "Memory graph node embeddings"},
)
self.collection = await asyncio.to_thread(_clear)
logger.warning(f"向量存储已清空: {self.collection_name}") logger.warning(f"向量存储已清空: {self.collection_name}")
except Exception as e: except Exception as e:

View File

@@ -559,6 +559,7 @@ class BaseAction(ABC):
action_require=getattr(cls, "action_require", []).copy(), action_require=getattr(cls, "action_require", []).copy(),
associated_types=getattr(cls, "associated_types", []).copy(), associated_types=getattr(cls, "associated_types", []).copy(),
chat_type_allow=getattr(cls, "chat_type_allow", ChatType.ALL), chat_type_allow=getattr(cls, "chat_type_allow", ChatType.ALL),
chatter_allow=getattr(cls, "chatter_allow", []).copy(),
# 二步Action相关属性 # 二步Action相关属性
is_two_step_action=getattr(cls, "is_two_step_action", False), is_two_step_action=getattr(cls, "is_two_step_action", False),
step_one_description=getattr(cls, "step_one_description", ""), step_one_description=getattr(cls, "step_one_description", ""),

View File

@@ -209,6 +209,7 @@ class ActionInfo(ComponentInfo):
mode_enable: ChatMode = ChatMode.ALL mode_enable: ChatMode = ChatMode.ALL
parallel_action: bool = False parallel_action: bool = False
chat_type_allow: ChatType = ChatType.ALL # 允许的聊天类型 chat_type_allow: ChatType = ChatType.ALL # 允许的聊天类型
chatter_allow: list[str] = field(default_factory=list) # 允许的 Chatter 列表,空则允许所有
# 二步Action相关属性 # 二步Action相关属性
is_two_step_action: bool = False # 是否为二步Action is_two_step_action: bool = False # 是否为二步Action
step_one_description: str = "" # 第一步的描述 step_one_description: str = "" # 第一步的描述
@@ -226,6 +227,8 @@ class ActionInfo(ComponentInfo):
self.associated_types = [] self.associated_types = []
if self.sub_actions is None: if self.sub_actions is None:
self.sub_actions = [] self.sub_actions = []
if self.chatter_allow is None:
self.chatter_allow = []
self.component_type = ComponentType.ACTION self.component_type = ComponentType.ACTION

View File

@@ -0,0 +1,7 @@
"""
AFC 专属动作模块
"""
from .reply import ReplyAction, RespondAction
__all__ = ["ReplyAction", "RespondAction"]

View File

@@ -0,0 +1,268 @@
"""
AFC 回复动作模块
定义了两种回复相关动作:
- reply: 针对单条消息的深度回复(使用 s4u 模板)
- respond: 对未读消息的统一回应(使用 normal 模板)
这些动作是 AffinityFlowChatter 的专属动作。
"""
import asyncio
from typing import ClassVar
from src.common.data_models.database_data_model import DatabaseMessages
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system import ActionActivationType, BaseAction, ChatMode
from src.plugin_system.apis import generator_api, send_api
logger = get_logger("afc_reply_actions")
class ReplyAction(BaseAction):
"""Reply动作 - 针对单条消息的深度回复
特点:
- 使用 s4u (Speak for You) 模板
- 专注于理解和回应单条消息的具体内容
- 适合 Focus 模式下的精准回复
- 仅限 AffinityFlowChatter 使用
"""
# 动作基本信息
action_name = "reply"
action_description = "针对特定消息进行精准回复。深度理解并回应单条消息的具体内容。需要指定目标消息ID。"
# 激活设置
activation_type = ActionActivationType.ALWAYS # 回复动作总是可用
mode_enable = ChatMode.ALL # 在所有模式下都可用
parallel_action = False # 回复动作不能与其他动作并行
# Chatter 限制:仅允许 AffinityFlowChatter 使用
chatter_allow: ClassVar[list[str]] = ["AffinityFlowChatter"]
# 动作参数定义
action_parameters: ClassVar = {
"target_message_id": "要回复的目标消息ID必需来自未读消息的 <m...> 标签)",
"content": "回复的具体内容可选由LLM生成",
"should_quote_reply": "是否引用原消息可选true/false默认false。群聊中回复较早消息或需要明确指向时使用true",
}
# 动作使用场景
action_require: ClassVar = [
"需要针对特定消息进行精准回复时使用",
"适合单条消息的深度理解和回应",
"必须提供准确的 target_message_id来自未读历史的 <m...> 标签)",
"私聊场景必须使用此动作(不支持 respond",
"群聊中需要明确回应某个特定用户或问题时使用",
"关注单条消息的具体内容和上下文细节",
]
# 关联类型
associated_types: ClassVar[list[str]] = ["text"]
async def execute(self) -> tuple[bool, str]:
"""执行reply动作 - 完整的回复流程"""
try:
# 确保 action_message 是 DatabaseMessages 类型,否则使用 None
reply_message = self.action_message if isinstance(self.action_message, DatabaseMessages) else None
# 检查目标消息是否为表情包
if reply_message and getattr(reply_message, "is_emoji", False):
if not getattr(global_config.chat, "allow_reply_to_emoji", True):
logger.info(f"{self.log_prefix} 目标消息为表情包且配置不允许回复,跳过")
return True, ""
# 准备 action_data
action_data = self.action_data.copy()
action_data["prompt_mode"] = "s4u"
# 生成回复
success, response_set, _ = await generator_api.generate_reply(
chat_stream=self.chat_stream,
reply_message=reply_message,
action_data=action_data,
available_actions={self.action_name: self.get_action_info()},
enable_tool=global_config.tool.enable_tool,
request_type="chat.replyer",
from_plugin=False,
)
if not success or not response_set:
logger.warning(f"{self.log_prefix} 回复生成失败")
return False, ""
# 发送回复
reply_text = await self._send_response(response_set)
logger.info(f"{self.log_prefix} reply 动作执行成功")
return True, reply_text
except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 回复任务被取消")
return False, ""
except Exception as e:
logger.error(f"{self.log_prefix} reply 动作执行失败: {e}")
import traceback
traceback.print_exc()
return False, ""
async def _send_response(self, response_set) -> str:
"""发送回复内容"""
reply_text = ""
should_quote = self.action_data.get("should_quote_reply", False)
first_sent = False
# 确保 action_message 是 DatabaseMessages 类型
reply_message = self.action_message if isinstance(self.action_message, DatabaseMessages) else None
for reply_seg in response_set:
# 处理元组格式
if isinstance(reply_seg, tuple) and len(reply_seg) >= 2:
_, data = reply_seg
else:
data = str(reply_seg)
if isinstance(data, list):
data = "".join(map(str, data))
reply_text += data
# 发送消息
if not first_sent:
await send_api.text_to_stream(
text=data,
stream_id=self.chat_stream.stream_id,
reply_to_message=reply_message,
set_reply=should_quote and bool(reply_message),
typing=False,
)
first_sent = True
else:
await send_api.text_to_stream(
text=data,
stream_id=self.chat_stream.stream_id,
reply_to_message=None,
set_reply=False,
typing=True,
)
return reply_text
class RespondAction(BaseAction):
"""Respond动作 - 对未读消息的统一回应
特点:
- 关注整体对话动态和未读消息的统一回应
- 适合对于群聊消息下的宏观回应
- 避免与单一用户深度对话而忽略其他用户的消息
- 仅限 AffinityFlowChatter 使用
"""
# 动作基本信息
action_name = "respond"
action_description = "统一回应所有未读消息。理解整体对话动态和话题走向,生成连贯的回复。无需指定目标消息。"
# 激活设置
activation_type = ActionActivationType.ALWAYS # 回应动作总是可用
mode_enable = ChatMode.ALL # 在所有模式下都可用
parallel_action = False # 回应动作不能与其他动作并行
# Chatter 限制:仅允许 AffinityFlowChatter 使用
chatter_allow: ClassVar[list[str]] = ["AffinityFlowChatter"]
# 动作参数定义
action_parameters: ClassVar = {
"content": "回复的具体内容可选由LLM生成",
}
# 动作使用场景
action_require: ClassVar = [
"需要统一回应多条未读消息时使用Normal 模式专用)",
"适合理解整体对话动态而非单条消息",
"不需要指定 target_message_id会自动处理所有未读消息",
"关注对话流程、话题走向和整体氛围",
"适合群聊中的自然对话流,无需精确指向特定消息",
"可以同时回应多个话题或参与者",
]
# 关联类型
associated_types: ClassVar[list[str]] = ["text"]
async def execute(self) -> tuple[bool, str]:
"""执行respond动作 - 完整的回复流程"""
try:
# 准备 action_data
action_data = self.action_data.copy()
action_data["prompt_mode"] = "normal"
# 确保 action_message 是 DatabaseMessages 类型,否则使用 None
reply_message = self.action_message if isinstance(self.action_message, DatabaseMessages) else None
# 生成回复
success, response_set, _ = await generator_api.generate_reply(
chat_stream=self.chat_stream,
reply_message=reply_message,
action_data=action_data,
available_actions={self.action_name: self.get_action_info()},
enable_tool=global_config.tool.enable_tool,
request_type="chat.replyer",
from_plugin=False,
)
if not success or not response_set:
logger.warning(f"{self.log_prefix} 回复生成失败")
return False, ""
# 发送回复respond 默认不引用)
reply_text = await self._send_response(response_set)
logger.info(f"{self.log_prefix} respond 动作执行成功")
return True, reply_text
except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 回复任务被取消")
return False, ""
except Exception as e:
logger.error(f"{self.log_prefix} respond 动作执行失败: {e}")
import traceback
traceback.print_exc()
return False, ""
async def _send_response(self, response_set) -> str:
"""发送回复内容(不引用原消息)"""
reply_text = ""
first_sent = False
for reply_seg in response_set:
if isinstance(reply_seg, tuple) and len(reply_seg) >= 2:
_, data = reply_seg
else:
data = str(reply_seg)
if isinstance(data, list):
data = "".join(map(str, data))
reply_text += data
if not first_sent:
await send_api.text_to_stream(
text=data,
stream_id=self.chat_stream.stream_id,
reply_to_message=None,
set_reply=False,
typing=False,
)
first_sent = True
else:
await send_api.text_to_stream(
text=data,
stream_id=self.chat_stream.stream_id,
reply_to_message=None,
set_reply=False,
typing=True,
)
return reply_text

View File

@@ -66,13 +66,6 @@ class ChatterPlanExecutor:
action_types = [action.action_type for action in plan.decided_actions] action_types = [action.action_type for action in plan.decided_actions]
logger.info(f"选择动作: {', '.join(action_types) if action_types else ''}") logger.info(f"选择动作: {', '.join(action_types) if action_types else ''}")
# 根据配置决定是否启用批量存储模式
if global_config.database.batch_action_storage_enabled:
self.action_manager.enable_batch_storage(plan.chat_id)
logger.debug("已启用批量存储模式")
else:
logger.debug("批量存储功能已禁用,使用立即存储模式")
execution_results = [] execution_results = []
reply_actions = [] reply_actions = []
other_actions = [] other_actions = []
@@ -109,9 +102,6 @@ class ChatterPlanExecutor:
f"规划执行完成: 总数={len(plan.decided_actions)}, 成功={successful_count}, 失败={len(execution_results) - successful_count}" f"规划执行完成: 总数={len(plan.decided_actions)}, 成功={successful_count}, 失败={len(execution_results) - successful_count}"
) )
# 批量存储所有待处理的动作
await self._flush_action_manager_batch_storage(plan)
return { return {
"executed_count": len(plan.decided_actions), "executed_count": len(plan.decided_actions),
"successful_count": successful_count, "successful_count": successful_count,
@@ -530,25 +520,3 @@ class ChatterPlanExecutor:
} }
for i, time_val in enumerate(recent_times) for i, time_val in enumerate(recent_times)
] ]
async def _flush_action_manager_batch_storage(self, plan: Plan):
"""使用 action_manager 的批量存储功能存储所有待处理的动作"""
try:
# 通过 chat_id 获取真实的 chat_stream 对象
from src.plugin_system.apis.chat_api import get_chat_manager
chat_manager = get_chat_manager()
chat_stream = await chat_manager.get_stream(plan.chat_id)
if chat_stream:
# 调用 action_manager 的批量存储
await self.action_manager.flush_batch_storage(chat_stream)
logger.info("批量存储完成:通过 action_manager 存储所有动作记录")
# 禁用批量存储模式
self.action_manager.disable_batch_storage()
except Exception as e:
logger.error(f"批量存储动作记录时发生错误: {e}")
# 确保在出错时也禁用批量存储模式
self.action_manager.disable_batch_storage()

View File

@@ -663,6 +663,18 @@ class ChatterPlanFilter:
f"[{action}] 找不到目标消息target_message_id: {action_data.get('target_message_id')}" f"[{action}] 找不到目标消息target_message_id: {action_data.get('target_message_id')}"
) )
# reply 动作必须有目标消息,如果仍然为 None则使用最新消息
if action in ["reply", "proactive_reply"] and action_message_obj is None:
logger.warning(f"[{action}] 目标消息为空,强制使用最新消息作为兜底")
latest_message_dict = self._get_latest_message(message_id_list)
if latest_message_dict:
from src.common.data_models.database_data_model import DatabaseMessages
try:
action_message_obj = DatabaseMessages(**latest_message_dict)
logger.info(f"[{action}] 成功使用最新消息: {action_message_obj.message_id}")
except Exception as e:
logger.error(f"[{action}] 无法转换最新消息: {e}")
return ActionPlannerInfo( return ActionPlannerInfo(
action_type=action, action_type=action,
reasoning=reasoning, reasoning=reasoning,

View File

@@ -264,7 +264,7 @@ class ChatterActionPlanner:
# 3. 在规划前,先进行动作修改 # 3. 在规划前,先进行动作修改
from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.planner_actions.action_modifier import ActionModifier
action_modifier = ActionModifier(self.action_manager, self.chat_id) action_modifier = ActionModifier(self.action_manager, self.chat_id)
await action_modifier.modify_actions() await action_modifier.modify_actions(chatter_name="AffinityFlowChatter")
# 4. 生成初始计划 # 4. 生成初始计划
initial_plan = await self.generator.generate(ChatMode.FOCUS) initial_plan = await self.generator.generate(ChatMode.FOCUS)

View File

@@ -86,4 +86,20 @@ class AffinityChatterPlugin(BasePlugin):
except Exception as e: except Exception as e:
logger.error(f"加载 ProactiveThinkingMessageHandler 时出错: {e}") logger.error(f"加载 ProactiveThinkingMessageHandler 时出错: {e}")
try:
# 延迟导入 ReplyActionAFC 专属动作)
from .actions.reply import ReplyAction
components.append((ReplyAction.get_action_info(), ReplyAction))
except Exception as e:
logger.error(f"加载 ReplyAction 时出错: {e}")
try:
# 延迟导入 RespondActionAFC 专属动作)
from .actions.reply import RespondAction
components.append((RespondAction.get_action_info(), RespondAction))
except Exception as e:
logger.error(f"加载 RespondAction 时出错: {e}")
return components return components

View File

@@ -699,6 +699,42 @@ async def execute_proactive_thinking(stream_id: str):
try: try:
# 0. 前置检查 # 0. 前置检查
# 0.-1 检查是否是私聊且 KFC 主动思考已启用(让 KFC 接管私聊主动思考)
try:
from src.chat.message_receive.chat_stream import get_chat_manager
chat_manager = get_chat_manager()
chat_stream = await chat_manager.get_stream(stream_id)
# 判断是否是私聊(使用 chat_type 枚举或从 stream_id 判断)
is_private = False
if chat_stream:
try:
is_private = chat_stream.chat_type.name == "private"
except Exception:
# 回退:从 stream_id 判断(私聊通常不包含 "group"
is_private = "group" not in stream_id.lower()
if is_private:
# 这是一个私聊,检查 KFC 是否启用且其主动思考是否启用
try:
from src.config.config import global_config
kfc_config = getattr(global_config, 'kokoro_flow_chatter', None)
if kfc_config:
kfc_enabled = getattr(kfc_config, 'enable', False)
proactive_config = getattr(kfc_config, 'proactive_thinking', None)
proactive_enabled = getattr(proactive_config, 'enabled', False) if proactive_config else False
if kfc_enabled and proactive_enabled:
logger.debug(
f"[主动思考] 私聊 {stream_id} 由 KFC 主动思考接管,跳过通用主动思考"
)
return
except Exception as e:
logger.debug(f"检查 KFC 配置时出错,继续执行通用主动思考: {e}")
except Exception as e:
logger.warning(f"检查私聊/KFC 状态时出错: {e},继续执行")
# 0.0 检查聊天流是否正在处理消息(双重保护) # 0.0 检查聊天流是否正在处理消息(双重保护)
try: try:
from src.chat.message_receive.chat_stream import get_chat_manager from src.chat.message_receive.chat_stream import get_chat_manager

View File

@@ -219,8 +219,7 @@ class EmojiAction(BaseAction):
) )
emoji_base64, emoji_description = random.choice(all_emojis_data) emoji_base64, emoji_description = random.choice(all_emojis_data)
assert global_config is not None elif global_config.emoji.emoji_selection_mode == "description":
if global_config.emoji.emoji_selection_mode == "description":
# --- 详细描述选择模式 --- # --- 详细描述选择模式 ---
# 获取最近的5条消息内容用于判断 # 获取最近的5条消息内容用于判断
recent_messages = await message_api.get_recent_messages(chat_id=self.chat_id, limit=20) recent_messages = await message_api.get_recent_messages(chat_id=self.chat_id, limit=20)

View File

@@ -1,8 +1,10 @@
""" """
核心动作插件 核心动作插件
将系统核心动作(reply、no_reply、emoji转换为新插件系统格式 将系统核心动作emoji转换为新插件系统格式
这是系统的内置插件,提供基础的聊天交互功能 这是系统的内置插件,提供基础的聊天交互功能
注意reply 和 respond 动作已移至 AffinityFlowChatter 插件
""" """
# 导入依赖的系统组件 # 导入依赖的系统组件
@@ -16,7 +18,6 @@ from src.plugin_system.base.config_types import ConfigField
# 导入API模块 - 标准Python包方式 # 导入API模块 - 标准Python包方式
from src.plugins.built_in.core_actions.emoji import EmojiAction from src.plugins.built_in.core_actions.emoji import EmojiAction
from src.plugins.built_in.core_actions.reply import ReplyAction, RespondAction
logger = get_logger("core_actions") logger = get_logger("core_actions")
@@ -26,11 +27,11 @@ class CoreActionsPlugin(BasePlugin):
"""核心动作插件 """核心动作插件
系统内置插件,提供基础的聊天交互功能: 系统内置插件,提供基础的聊天交互功能:
- Reply: 回复动作
- NoReply: 不回复动作
- Emoji: 表情动作 - Emoji: 表情动作
注意:插件基本信息优先从_manifest.json文件中读取 注意:
- reply 和 respond 动作已移至 AffinityFlowChatter 插件
- 插件基本信息优先从_manifest.json文件中读取
""" """
# 插件基本信息 # 插件基本信息
@@ -53,8 +54,6 @@ class CoreActionsPlugin(BasePlugin):
"config_version": ConfigField(type=str, default="0.6.0", description="配置文件版本"), "config_version": ConfigField(type=str, default="0.6.0", description="配置文件版本"),
}, },
"components": { "components": {
"enable_reply": ConfigField(type=bool, default=True, description="是否启用 reply 动作s4u模板"),
"enable_respond": ConfigField(type=bool, default=True, description="是否启用 respond 动作normal模板"),
"enable_emoji": ConfigField(type=bool, default=True, description="是否启用发送表情/图片动作"), "enable_emoji": ConfigField(type=bool, default=True, description="是否启用发送表情/图片动作"),
}, },
} }
@@ -65,14 +64,6 @@ class CoreActionsPlugin(BasePlugin):
# --- 根据配置注册组件 --- # --- 根据配置注册组件 ---
components: ClassVar = [] components: ClassVar = []
# 注册 reply 动作
if self.get_config("components.enable_reply", True):
components.append((ReplyAction.get_action_info(), ReplyAction))
# 注册 respond 动作
if self.get_config("components.enable_respond", True):
components.append((RespondAction.get_action_info(), RespondAction))
# 注册 emoji 动作 # 注册 emoji 动作
if self.get_config("components.enable_emoji", True): if self.get_config("components.enable_emoji", True):
components.append((EmojiAction.get_action_info(), EmojiAction)) components.append((EmojiAction.get_action_info(), EmojiAction))

View File

@@ -1,108 +0,0 @@
"""
回复动作模块
定义了两种回复动作:
- reply: 针对单条消息的深度回复(使用 s4u 模板)
- respond: 对未读消息的统一回应(使用 normal 模板)
"""
from typing import ClassVar
from src.common.logger import get_logger
from src.plugin_system import ActionActivationType, BaseAction, ChatMode
logger = get_logger("reply_actions")
class ReplyAction(BaseAction):
"""Reply动作 - 针对单条消息的深度回复
特点:
- 使用 s4u (Speak for You) 模板
- 专注于理解和回应单条消息的具体内容
- 适合 Focus 模式下的精准回复
"""
# 动作基本信息
action_name = "reply"
action_description = "针对特定消息进行精准回复。深度理解并回应单条消息的具体内容。需要指定目标消息ID。"
# 激活设置
activation_type = ActionActivationType.ALWAYS # 回复动作总是可用
mode_enable = ChatMode.ALL # 在所有模式下都可用
parallel_action = False # 回复动作不能与其他动作并行
# 动作参数定义
action_parameters: ClassVar = {
"target_message_id": "要回复的目标消息ID必需来自未读消息的 <m...> 标签)",
"content": "回复的具体内容可选由LLM生成",
"should_quote_reply": "是否引用原消息可选true/false默认false。群聊中回复较早消息或需要明确指向时使用true",
}
# 动作使用场景
action_require: ClassVar = [
"需要针对特定消息进行精准回复时使用",
"适合单条消息的深度理解和回应",
"必须提供准确的 target_message_id来自未读历史的 <m...> 标签)",
"私聊场景必须使用此动作(不支持 respond",
"群聊中需要明确回应某个特定用户或问题时使用",
"关注单条消息的具体内容和上下文细节",
]
# 关联类型
associated_types: ClassVar[list[str]] = ["text"]
async def execute(self) -> tuple[bool, str]:
"""执行reply动作
注意:实际的回复生成由 action_manager 统一处理
这里只是标记使用 reply 动作s4u 模板)
"""
logger.info(f"{self.log_prefix} 使用 reply 动作s4u 模板)")
return True, ""
class RespondAction(BaseAction):
"""Respond动作 - 对未读消息的统一回应
特点:
- 关注整体对话动态和未读消息的统一回应
- 适合对于群聊消息下的宏观回应
- 避免与单一用户深度对话而忽略其他用户的消息
"""
# 动作基本信息
action_name = "respond"
action_description = "统一回应所有未读消息。理解整体对话动态和话题走向,生成连贯的回复。无需指定目标消息。"
# 激活设置
activation_type = ActionActivationType.ALWAYS # 回应动作总是可用
mode_enable = ChatMode.ALL # 在所有模式下都可用
parallel_action = False # 回应动作不能与其他动作并行
# 动作参数定义
action_parameters: ClassVar = {
"content": "回复的具体内容可选由LLM生成",
}
# 动作使用场景
action_require: ClassVar = [
"需要统一回应多条未读消息时使用Normal 模式专用)",
"适合理解整体对话动态而非单条消息",
"不需要指定 target_message_id会自动处理所有未读消息",
"关注对话流程、话题走向和整体氛围",
"适合群聊中的自然对话流,无需精确指向特定消息",
"可以同时回应多个话题或参与者",
]
# 关联类型
associated_types: ClassVar[list[str]] = ["text"]
async def execute(self) -> tuple[bool, str]:
"""执行respond动作
注意:实际的回复生成由 action_manager 统一处理
这里只是标记使用 respond 动作normal 模板)
"""
logger.info(f"{self.log_prefix} 使用 respond 动作normal 模板)")
return True, ""

View File

@@ -0,0 +1,89 @@
"""
Kokoro Flow Chatter (KFC) - 私聊特化的心流聊天器
重构版本,支持双模式架构:
工作模式:
- unified统一模式: 单次 LLM 调用完成思考和回复生成(默认)
- split分离模式: Planner + Replyer 两次 LLM 调用,更精细的控制
核心设计理念:
1. Chatter 职责极简化:只负责"收到消息 → 规划执行"
2. Session 状态简化:只有 IDLE 和 WAITING 两种状态
3. 独立的 Replyer专属的提示词构建和 LLM 交互
4. 独立的主动思考器:负责等待管理和主动发起
5. 大模板 + 小模板:线性叙事风格的提示词架构
"""
from .models import (
EventType,
SessionStatus,
MentalLogEntry,
WaitingConfig,
ActionModel,
LLMResponse,
)
from .session import KokoroSession, SessionManager, get_session_manager
from .chatter import KokoroFlowChatter
from .planner import generate_plan
from .replyer import generate_reply_text
from .unified import generate_unified_response
from .proactive_thinker import (
ProactiveThinker,
get_proactive_thinker,
start_proactive_thinker,
stop_proactive_thinker,
)
from .config import (
KFCMode,
KokoroFlowChatterConfig,
get_config,
load_config,
reload_config,
)
from .plugin import KokoroFlowChatterPlugin
from src.plugin_system.base.plugin_metadata import PluginMetadata
__plugin_meta__ = PluginMetadata(
name="Kokoro Flow Chatter",
description="专为私聊设计的深度情感交互处理器,支持统一/分离双模式",
usage="在私聊场景中自动启用,可通过 [kokoro_flow_chatter].enable 和 .mode 配置",
version="3.1.0",
author="MoFox",
keywords=["chatter", "kokoro", "private", "emotional", "narrative", "dual-mode"],
categories=["Chat", "AI", "Emotional"],
extra={"is_built_in": True, "chat_type": "private"},
)
__all__ = [
# Models
"EventType",
"SessionStatus",
"MentalLogEntry",
"WaitingConfig",
"ActionModel",
"LLMResponse",
# Session
"KokoroSession",
"SessionManager",
"get_session_manager",
# Core Components
"KokoroFlowChatter",
"generate_plan",
"generate_reply_text",
"generate_unified_response",
# Proactive Thinker
"ProactiveThinker",
"get_proactive_thinker",
"start_proactive_thinker",
"stop_proactive_thinker",
# Config
"KFCMode",
"KokoroFlowChatterConfig",
"get_config",
"load_config",
"reload_config",
# Plugin
"KokoroFlowChatterPlugin",
"__plugin_meta__",
]

View File

@@ -0,0 +1,7 @@
"""
KFC V2 专属动作模块
"""
from .reply import KFCReplyAction
__all__ = ["KFCReplyAction"]

View File

@@ -0,0 +1,288 @@
"""
KFC 回复动作模块
KFC 的 reply 动作:
- 完整的回复流程在 execute() 中实现
- 调用 Replyer 生成回复文本
- 回复后处理(系统格式词过滤、分段发送、错字生成等)
- 发送回复消息
与 AFC 类似,但使用 KFC 专属的 Replyer 和 Session 系统。
"""
import asyncio
from typing import TYPE_CHECKING, ClassVar, Optional
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system import ActionActivationType, BaseAction, ChatMode
from src.plugin_system.apis import send_api
if TYPE_CHECKING:
from ..session import KokoroSession
logger = get_logger("kfc_reply_action")
class KFCReplyAction(BaseAction):
"""KFC Reply 动作 - 完整的私聊回复流程
特点:
- 完整的回复流程:生成回复 → 后处理 → 分段发送
- 使用 KFC 专属的 Replyer 生成回复
- 支持系统格式词过滤、分段发送、错字生成等后处理
- 仅限 KokoroFlowChatter 使用
action_data 参数:
- user_id: 用户ID必需用于获取 Session
- user_name: 用户名称(必需)
- thought: Planner 生成的想法/内心独白(必需)
- situation_type: 情况类型(可选,默认 "new_message"
- extra_context: 额外上下文(可选)
- content: 预生成的回复内容(可选,如果提供则直接发送)
- should_quote_reply: 是否引用原消息(可选,默认 false
- enable_splitter: 是否启用分段发送(可选,默认 true
- enable_chinese_typo: 是否启用错字生成(可选,默认 true
"""
# 动作基本信息
action_name = "kfc_reply"
action_description = "发送回复消息。会根据当前对话情境生成并发送回复。"
# 激活设置
activation_type = ActionActivationType.ALWAYS
mode_enable = ChatMode.ALL
parallel_action = False
# Chatter 限制:仅允许 KokoroFlowChatter 使用
chatter_allow: ClassVar[list[str]] = ["KokoroFlowChatter"]
# 动作参数定义
action_parameters: ClassVar = {
"content": "要发送的回复内容(可选,如果不提供则自动生成)",
"should_quote_reply": "是否引用原消息可选true/false默认 false",
}
# 动作使用场景
action_require: ClassVar = [
"需要发送回复消息时使用",
"私聊场景的标准回复动作",
]
# 关联类型
associated_types: ClassVar[list[str]] = ["text"]
async def execute(self) -> tuple[bool, str]:
"""执行 reply 动作 - 完整的回复流程"""
try:
# 1. 检查是否有预生成的内容
content = self.action_data.get("content", "")
if not content:
# 2. 需要生成回复,获取必要信息
user_id = self.action_data.get("user_id")
user_name = self.action_data.get("user_name", "用户")
thought = self.action_data.get("thought", "")
situation_type = self.action_data.get("situation_type", "new_message")
extra_context = self.action_data.get("extra_context")
if not user_id:
logger.warning(f"{self.log_prefix} 缺少 user_id无法生成回复")
return False, ""
# 3. 获取 Session
session = await self._get_session(user_id)
if not session:
logger.warning(f"{self.log_prefix} 无法获取 Session: {user_id}")
return False, ""
# 4. 调用 Replyer 生成回复
success, content = await self._generate_reply(
session=session,
user_name=user_name,
thought=thought,
situation_type=situation_type,
extra_context=extra_context,
)
if not success or not content:
logger.warning(f"{self.log_prefix} 回复生成失败")
return False, ""
# 5. 回复后处理(系统格式词过滤 + 分段处理)
enable_splitter = self.action_data.get("enable_splitter", True)
enable_chinese_typo = self.action_data.get("enable_chinese_typo", True)
processed_segments = self._post_process_reply(
content=content,
enable_splitter=enable_splitter,
enable_chinese_typo=enable_chinese_typo,
)
if not processed_segments:
logger.warning(f"{self.log_prefix} 回复后处理后内容为空")
return False, ""
# 6. 分段发送回复
should_quote = self.action_data.get("should_quote_reply", False)
reply_text = await self._send_segments(
segments=processed_segments,
should_quote=should_quote,
)
logger.info(f"{self.log_prefix} KFC reply 动作执行成功: {reply_text[:50]}...")
return True, reply_text
except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 回复任务被取消")
return False, ""
except Exception as e:
logger.error(f"{self.log_prefix} KFC reply 动作执行失败: {e}")
import traceback
traceback.print_exc()
return False, ""
def _post_process_reply(
self,
content: str,
enable_splitter: bool = True,
enable_chinese_typo: bool = True,
) -> list[str]:
"""
回复后处理
包括:
1. 系统格式词过滤(移除 [回复...]、[表情包:...]、@<...> 等)
2. 分段处理(根据标点分句、智能合并)
3. 错字生成(拟人化)
Args:
content: 原始回复内容
enable_splitter: 是否启用分段
enable_chinese_typo: 是否启用错字生成
Returns:
处理后的文本段落列表
"""
try:
from src.chat.utils.utils import filter_system_format_content, process_llm_response
# 1. 过滤系统格式词
filtered_content = filter_system_format_content(content)
if not filtered_content or not filtered_content.strip():
logger.warning(f"{self.log_prefix} 过滤系统格式词后内容为空")
return []
# 2. 分段处理 + 错字生成
processed_segments = process_llm_response(
filtered_content,
enable_splitter=enable_splitter,
enable_chinese_typo=enable_chinese_typo,
)
# 过滤空段落
processed_segments = [seg for seg in processed_segments if seg and seg.strip()]
logger.debug(
f"{self.log_prefix} 回复后处理完成: "
f"原始长度={len(content)}, 过滤后长度={len(filtered_content)}, "
f"分段数={len(processed_segments)}"
)
return processed_segments
except Exception as e:
logger.error(f"{self.log_prefix} 回复后处理失败: {e}")
# 失败时返回原始内容
return [content] if content else []
async def _send_segments(
self,
segments: list[str],
should_quote: bool = False,
) -> str:
"""
分段发送回复
Args:
segments: 要发送的文本段落列表
should_quote: 是否引用原消息(仅第一条消息引用)
Returns:
完整的回复文本(所有段落拼接)
"""
reply_text = ""
first_sent = False
# 获取分段发送的间隔时间
typing_delay = 0.5
if global_config and hasattr(global_config, 'response_splitter'):
typing_delay = getattr(global_config.response_splitter, "typing_delay", 0.5)
for segment in segments:
if not segment or not segment.strip():
continue
reply_text += segment
# 发送消息
if not first_sent:
# 第一条消息:可能需要引用
await send_api.text_to_stream(
text=segment,
stream_id=self.chat_stream.stream_id,
reply_to_message=self.action_message,
set_reply=should_quote and bool(self.action_message),
typing=False,
)
first_sent = True
else:
# 后续消息:模拟打字延迟
if typing_delay > 0:
await asyncio.sleep(typing_delay)
await send_api.text_to_stream(
text=segment,
stream_id=self.chat_stream.stream_id,
reply_to_message=None,
set_reply=False,
typing=True,
)
return reply_text
async def _get_session(self, user_id: str) -> Optional["KokoroSession"]:
"""获取用户 Session"""
try:
from ..session import get_session_manager
session_manager = get_session_manager()
return await session_manager.get_session(user_id, self.chat_stream.stream_id)
except Exception as e:
logger.error(f"{self.log_prefix} 获取 Session 失败: {e}")
return None
async def _generate_reply(
self,
session: "KokoroSession",
user_name: str,
thought: str,
situation_type: str,
extra_context: Optional[dict] = None,
) -> tuple[bool, str]:
"""调用 Replyer 生成回复"""
try:
from ..replyer import generate_reply_text
return await generate_reply_text(
session=session,
user_name=user_name,
thought=thought,
situation_type=situation_type,
chat_stream=self.chat_stream,
extra_context=extra_context,
)
except Exception as e:
logger.error(f"{self.log_prefix} 生成回复失败: {e}")
return False, ""

View File

@@ -0,0 +1,376 @@
"""
Kokoro Flow Chatter - Chatter 主类
支持两种工作模式:
1. unified统一模式: 单次 LLM 调用完成思考 + 回复生成
2. split分离模式: Planner + Replyer 两次 LLM 调用
核心设计:
- Chatter 只负责 "收到消息 → 规划执行" 的流程
- 无论 Session 之前是什么状态,流程都一样
- 区别只体现在提示词中
不负责:
- 等待超时处理(由 ProactiveThinker 负责)
- 连续思考(由 ProactiveThinker 负责)
- 主动发起对话(由 ProactiveThinker 负责)
"""
import asyncio
import time
from typing import TYPE_CHECKING, Any, ClassVar
from src.chat.planner_actions.action_manager import ChatterActionManager
from src.common.data_models.message_manager_data_model import StreamContext
from src.common.logger import get_logger
from src.plugin_system.base.base_chatter import BaseChatter
from src.plugin_system.base.component_types import ChatType
from .config import KFCMode, get_config
from .models import SessionStatus
from .session import get_session_manager
if TYPE_CHECKING:
pass
logger = get_logger("kfc_chatter")
class KokoroFlowChatter(BaseChatter):
"""
Kokoro Flow Chatter - 私聊特化的心流聊天器
支持两种工作模式(通过配置切换):
- unified: 单次 LLM 调用完成思考和回复
- split: Planner + Replyer 两次 LLM 调用
核心设计:
- Chatter 只负责 "收到消息 → 规划执行" 的流程
- 无论 Session 之前是什么状态,流程都一样
- 区别只体现在提示词中
不负责:
- 等待超时处理(由 ProactiveThinker 负责)
- 连续思考(由 ProactiveThinker 负责)
- 主动发起对话(由 ProactiveThinker 负责)
"""
chatter_name: str = "KokoroFlowChatter"
chatter_description: str = "心流聊天器 - 私聊特化的深度情感交互处理器"
chat_types: ClassVar[list[ChatType]] = [ChatType.PRIVATE]
def __init__(
self,
stream_id: str,
action_manager: "ChatterActionManager",
plugin_config: dict | None = None,
):
super().__init__(stream_id, action_manager, plugin_config)
# 核心组件
self.session_manager = get_session_manager()
# 加载配置
self._config = get_config()
self._mode = self._config.mode
# 并发控制
self._lock = asyncio.Lock()
self._processing = False
# 统计
self._stats: dict[str, Any] = {
"messages_processed": 0,
"successful_responses": 0,
"failed_responses": 0,
}
# 输出初始化信息
mode_str = "统一模式" if self._mode == KFCMode.UNIFIED else "分离模式"
logger.info(f"初始化完成 (模式: {mode_str}): stream_id={stream_id}")
async def execute(self, context: StreamContext) -> dict:
"""
执行聊天处理
流程:
1. 获取 Session
2. 获取未读消息
3. 记录用户消息到 mental_log
4. 确定 situation_type根据之前的等待状态
5. 根据模式调用对应的生成器
6. 执行动作
7. 更新 Session记录 Bot 规划,设置等待状态)
8. 保存 Session
"""
async with self._lock:
self._processing = True
try:
# 1. 获取未读消息
unread_messages = context.get_unread_messages()
if not unread_messages:
return self._build_result(success=True, message="no_unread_messages")
# 2. 取最后一条消息作为主消息
target_message = unread_messages[-1]
user_info = target_message.user_info
if not user_info:
return self._build_result(success=False, message="no_user_info")
user_id = str(user_info.user_id)
user_name = user_info.user_nickname or user_id
# 3. 获取或创建 Session
session = await self.session_manager.get_session(user_id, self.stream_id)
# 4. 确定 situation_type根据之前的等待状态
situation_type = self._determine_situation_type(session)
# 5. **立即**结束等待状态,防止 ProactiveThinker 并发处理
if session.status == SessionStatus.WAITING:
session.end_waiting()
await self.session_manager.save_session(user_id)
# 6. 记录用户消息到 mental_log
for msg in unread_messages:
msg_content = msg.processed_plain_text or msg.display_message or ""
msg_user_name = msg.user_info.user_nickname if msg.user_info else user_name
msg_user_id = str(msg.user_info.user_id) if msg.user_info else user_id
session.add_user_message(
content=msg_content,
user_name=msg_user_name,
user_id=msg_user_id,
timestamp=msg.time,
)
# 7. 加载可用动作(通过 ActionModifier 过滤)
from src.chat.planner_actions.action_modifier import ActionModifier
action_modifier = ActionModifier(self.action_manager, self.stream_id)
await action_modifier.modify_actions(chatter_name="KokoroFlowChatter")
available_actions = self.action_manager.get_using_actions()
# 8. 获取聊天流
chat_stream = await self._get_chat_stream()
# 9. 根据模式调用对应的生成器
if self._mode == KFCMode.UNIFIED:
plan_response = await self._execute_unified_mode(
session=session,
user_name=user_name,
situation_type=situation_type,
chat_stream=chat_stream,
available_actions=available_actions,
)
else:
plan_response = await self._execute_split_mode(
session=session,
user_name=user_name,
user_id=user_id,
situation_type=situation_type,
chat_stream=chat_stream,
available_actions=available_actions,
)
# 10. 执行动作
exec_results = []
has_reply = False
for action in plan_response.actions:
result = await self.action_manager.execute_action(
action_name=action.type,
chat_id=self.stream_id,
target_message=target_message,
reasoning=plan_response.thought,
action_data=action.params,
thinking_id=None,
log_prefix="[KFC]",
)
exec_results.append(result)
if result.get("success") and action.type in ("kfc_reply", "respond"):
has_reply = True
# 11. 记录 Bot 规划到 mental_log
session.add_bot_planning(
thought=plan_response.thought,
actions=[a.to_dict() for a in plan_response.actions],
expected_reaction=plan_response.expected_reaction,
max_wait_seconds=plan_response.max_wait_seconds,
)
# 12. 更新 Session 状态
if plan_response.max_wait_seconds > 0:
session.start_waiting(
expected_reaction=plan_response.expected_reaction,
max_wait_seconds=plan_response.max_wait_seconds,
)
else:
session.end_waiting()
# 13. 标记消息为已读
for msg in unread_messages:
context.mark_message_as_read(str(msg.message_id))
# 14. 保存 Session
await self.session_manager.save_session(user_id)
# 15. 更新统计
self._stats["messages_processed"] += len(unread_messages)
if has_reply:
self._stats["successful_responses"] += 1
# 输出完成信息
mode_str = "unified" if self._mode == KFCMode.UNIFIED else "split"
logger.info(
f"处理完成 ({mode_str}): "
f"user={user_name}, situation={situation_type}, "
f"actions={[a.type for a in plan_response.actions]}, "
f"wait={plan_response.max_wait_seconds}s"
)
return self._build_result(
success=True,
message="processed",
has_reply=has_reply,
thought=plan_response.thought,
situation_type=situation_type,
mode=mode_str,
)
except Exception as e:
self._stats["failed_responses"] += 1
logger.error(f"[KFC] 处理失败: {e}")
import traceback
traceback.print_exc()
return self._build_result(success=False, message=str(e), error=True)
finally:
self._processing = False
async def _execute_unified_mode(
self,
session,
user_name: str,
situation_type: str,
chat_stream,
available_actions,
):
"""
统一模式:单次 LLM 调用完成思考 + 回复生成
LLM 输出的 JSON 中 kfc_reply 动作已包含 content 字段,
无需再调用 Replyer 生成回复。
"""
from .unified import generate_unified_response
plan_response = await generate_unified_response(
session=session,
user_name=user_name,
situation_type=situation_type,
chat_stream=chat_stream,
available_actions=available_actions,
)
# 统一模式下 content 已经在 actions 中,无需注入
return plan_response
async def _execute_split_mode(
self,
session,
user_name: str,
user_id: str,
situation_type: str,
chat_stream,
available_actions,
):
"""
分离模式Planner + Replyer 两次 LLM 调用
1. Planner 生成行动计划JSONkfc_reply 不含 content
2. 为 kfc_reply 动作注入上下文,由 Action.execute() 调用 Replyer 生成回复
"""
from .planner import generate_plan
plan_response = await generate_plan(
session=session,
user_name=user_name,
situation_type=situation_type,
chat_stream=chat_stream,
available_actions=available_actions,
)
# 为 kfc_reply 动作注入回复生成所需的上下文
for action in plan_response.actions:
if action.type == "kfc_reply":
action.params["user_id"] = user_id
action.params["user_name"] = user_name
action.params["thought"] = plan_response.thought
action.params["situation_type"] = situation_type
return plan_response
def _determine_situation_type(self, session) -> str:
"""
确定当前情况类型
根据 Session 之前的状态决定提示词的 situation_type
"""
if session.status == SessionStatus.WAITING:
# 之前在等待
if session.waiting_config.is_timeout():
# 超时了才收到回复
return "reply_late"
else:
# 在预期内收到回复
return "reply_in_time"
else:
# 之前是 IDLE
return "new_message"
async def _get_chat_stream(self):
"""获取聊天流对象"""
try:
from src.chat.message_receive.chat_stream import get_chat_manager
chat_manager = get_chat_manager()
if chat_manager:
return await chat_manager.get_stream(self.stream_id)
except Exception as e:
logger.warning(f"[KFC] 获取 chat_stream 失败: {e}")
return None
def _build_result(
self,
success: bool,
message: str = "",
error: bool = False,
**kwargs,
) -> dict:
"""构建返回结果"""
result = {
"success": success,
"stream_id": self.stream_id,
"message": message,
"error": error,
"timestamp": time.time(),
}
result.update(kwargs)
return result
def get_stats(self) -> dict[str, Any]:
"""获取统计信息"""
stats = self._stats.copy()
stats["mode"] = self._mode.value
return stats
@property
def is_processing(self) -> bool:
"""是否正在处理"""
return self._processing
@property
def mode(self) -> KFCMode:
"""当前工作模式"""
return self._mode

View File

@@ -0,0 +1,264 @@
"""
Kokoro Flow Chatter - 配置
可以通过 TOML 配置文件覆盖默认值
支持两种工作模式:
- unified: 统一模式,单次 LLM 调用完成思考和回复生成(类似旧版架构)
- split: 分离模式Planner + Replyer 两次 LLM 调用(推荐,更精细的控制)
"""
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional
class KFCMode(str, Enum):
"""KFC 工作模式"""
# 统一模式:单次 LLM 调用,生成思考 + 回复(类似旧版架构)
UNIFIED = "unified"
# 分离模式Planner 生成规划Replyer 生成回复(推荐)
SPLIT = "split"
@classmethod
def from_str(cls, value: str) -> "KFCMode":
"""从字符串创建模式"""
value = value.lower().strip()
if value == "unified":
return cls.UNIFIED
elif value == "split":
return cls.SPLIT
else:
# 默认使用统一模式
return cls.UNIFIED
@dataclass
class WaitingDefaults:
"""等待配置默认值"""
# 默认最大等待时间(秒)
default_max_wait_seconds: int = 300
# 最小等待时间
min_wait_seconds: int = 30
# 最大等待时间
max_wait_seconds: int = 1800
@dataclass
class ProactiveConfig:
"""主动思考配置"""
# 是否启用主动思考
enabled: bool = True
# 沉默阈值(秒),超过此时间考虑主动发起
silence_threshold_seconds: int = 7200
# 两次主动发起最小间隔(秒)
min_interval_between_proactive: int = 1800
# 勿扰时段开始HH:MM 格式)
quiet_hours_start: str = "23:00"
# 勿扰时段结束
quiet_hours_end: str = "07:00"
# 主动发起概率0.0 ~ 1.0
trigger_probability: float = 0.3
# 关系门槛:最低好感度,达到此值才会主动关心
min_affinity_for_proactive: float = 0.3
@dataclass
class PromptConfig:
"""提示词配置"""
# 活动记录保留条数
max_activity_entries: int = 30
# 每条记录最大字符数
max_entry_length: int = 500
# 是否包含人物关系信息
include_relation: bool = True
# 是否包含记忆信息
include_memory: bool = True
@dataclass
class SessionConfig:
"""会话配置"""
# Session 持久化目录(相对于 data/
session_dir: str = "kokoro_flow_chatter/sessions"
# Session 自动过期时间(秒),超过此时间未活动自动清理
session_expire_seconds: int = 86400 * 7 # 7 天
# 活动记录保留上限
max_mental_log_entries: int = 100
@dataclass
class LLMConfig:
"""LLM 配置"""
# 模型名称(空则使用默认)
model_name: str = ""
# Temperature
temperature: float = 0.8
# 最大 Token
max_tokens: int = 1024
# 请求超时(秒)
timeout: float = 60.0
@dataclass
class KokoroFlowChatterConfig:
"""Kokoro Flow Chatter 总配置"""
# 是否启用
enabled: bool = True
# 工作模式unified统一模式或 split分离模式
# - unified: 单次 LLM 调用完成思考和回复生成(类似旧版架构,更简洁)
# - split: Planner + Replyer 两次 LLM 调用(更精细的控制,推荐)
mode: KFCMode = KFCMode.UNIFIED
# 启用的消息源类型(空列表表示全部)
enabled_stream_types: List[str] = field(default_factory=lambda: ["private"])
# 等待配置
waiting: WaitingDefaults = field(default_factory=WaitingDefaults)
# 主动思考配置
proactive: ProactiveConfig = field(default_factory=ProactiveConfig)
# 提示词配置
prompt: PromptConfig = field(default_factory=PromptConfig)
# 会话配置
session: SessionConfig = field(default_factory=SessionConfig)
# LLM 配置
llm: LLMConfig = field(default_factory=LLMConfig)
# 调试模式
debug: bool = False
# 全局配置单例
_config: Optional[KokoroFlowChatterConfig] = None
def get_config() -> KokoroFlowChatterConfig:
"""获取全局配置"""
global _config
if _config is None:
_config = load_config()
return _config
def load_config() -> KokoroFlowChatterConfig:
"""从全局配置加载 KFC 配置"""
from src.config.config import global_config
config = KokoroFlowChatterConfig()
# 尝试从全局配置读取
if not global_config:
return config
try:
if hasattr(global_config, 'kokoro_flow_chatter'):
kfc_cfg = getattr(global_config, 'kokoro_flow_chatter')
# 基础配置 - 支持 enabled 和 enable 两种写法
if hasattr(kfc_cfg, 'enable'):
config.enabled = kfc_cfg.enable
if hasattr(kfc_cfg, 'enabled_stream_types'):
config.enabled_stream_types = list(kfc_cfg.enabled_stream_types)
if hasattr(kfc_cfg, 'debug'):
config.debug = kfc_cfg.debug
# 工作模式配置
if hasattr(kfc_cfg, 'mode'):
config.mode = KFCMode.from_str(str(kfc_cfg.mode))
# 等待配置
if hasattr(kfc_cfg, 'waiting'):
wait_cfg = kfc_cfg.waiting
config.waiting = WaitingDefaults(
default_max_wait_seconds=getattr(wait_cfg, 'default_max_wait_seconds', 300),
min_wait_seconds=getattr(wait_cfg, 'min_wait_seconds', 30),
max_wait_seconds=getattr(wait_cfg, 'max_wait_seconds', 1800),
)
# 主动思考配置 - 支持 proactive 和 proactive_thinking 两种写法
pro_cfg = None
if hasattr(kfc_cfg, 'proactive_thinking'):
pro_cfg = kfc_cfg.proactive_thinking
if pro_cfg:
config.proactive = ProactiveConfig(
enabled=getattr(pro_cfg, 'enabled', True),
silence_threshold_seconds=getattr(pro_cfg, 'silence_threshold_seconds', 7200),
min_interval_between_proactive=getattr(pro_cfg, 'min_interval_between_proactive', 1800),
quiet_hours_start=getattr(pro_cfg, 'quiet_hours_start', "23:00"),
quiet_hours_end=getattr(pro_cfg, 'quiet_hours_end', "07:00"),
trigger_probability=getattr(pro_cfg, 'trigger_probability', 0.3),
min_affinity_for_proactive=getattr(pro_cfg, 'min_affinity_for_proactive', 0.3),
)
# 提示词配置
if hasattr(kfc_cfg, 'prompt'):
pmt_cfg = kfc_cfg.prompt
config.prompt = PromptConfig(
max_activity_entries=getattr(pmt_cfg, 'max_activity_entries', 30),
max_entry_length=getattr(pmt_cfg, 'max_entry_length', 500),
include_relation=getattr(pmt_cfg, 'include_relation', True),
include_memory=getattr(pmt_cfg, 'include_memory', True),
)
# 会话配置
if hasattr(kfc_cfg, 'session'):
sess_cfg = kfc_cfg.session
config.session = SessionConfig(
session_dir=getattr(sess_cfg, 'session_dir', "kokoro_flow_chatter/sessions"),
session_expire_seconds=getattr(sess_cfg, 'session_expire_seconds', 86400 * 7),
max_mental_log_entries=getattr(sess_cfg, 'max_mental_log_entries', 100),
)
# LLM 配置
if hasattr(kfc_cfg, 'llm'):
llm_cfg = kfc_cfg.llm
config.llm = LLMConfig(
model_name=getattr(llm_cfg, 'model_name', ""),
temperature=getattr(llm_cfg, 'temperature', 0.8),
max_tokens=getattr(llm_cfg, 'max_tokens', 1024),
timeout=getattr(llm_cfg, 'timeout', 60.0),
)
except Exception as e:
from src.common.logger import get_logger
logger = get_logger("kfc_config")
logger.warning(f"加载 KFC 配置失败,使用默认值: {e}")
return config
def reload_config() -> KokoroFlowChatterConfig:
"""重新加载配置"""
global _config
_config = load_config()
return _config

View File

@@ -0,0 +1,350 @@
"""
Kokoro Flow Chatter 上下文构建器
为 KFC 提供完整的情境感知能力。
包含:
- 关系信息 (relation_info)
- 记忆块 (memory_block)
- 表达习惯 (expression_habits)
- 日程信息 (schedule)
- 时间信息 (time)
"""
import asyncio
import time
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Optional
from src.common.logger import get_logger
from src.config.config import global_config
from src.person_info.person_info import get_person_info_manager, PersonInfoManager
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
from src.common.data_models.message_manager_data_model import StreamContext
logger = get_logger("kfc_context_builder")
def _get_config():
"""获取全局配置(带类型断言)"""
assert global_config is not None, "global_config 未初始化"
return global_config
class KFCContextBuilder:
"""
KFC V2 上下文构建器
为提示词提供完整的情境感知数据。
"""
def __init__(self, chat_stream: "ChatStream"):
self.chat_stream = chat_stream
self.chat_id = chat_stream.stream_id
self.platform = chat_stream.platform
self.is_group_chat = bool(chat_stream.group_info)
async def build_all_context(
self,
sender_name: str,
target_message: str,
context: Optional["StreamContext"] = None,
user_id: Optional[str] = None,
) -> dict[str, str]:
"""
并行构建所有上下文模块
Args:
sender_name: 发送者名称
target_message: 目标消息内容
context: 聊天流上下文(可选)
user_id: 用户ID可选用于精确查找关系信息
Returns:
dict: 包含所有上下文块的字典
"""
chat_history = await self._get_chat_history_text(context)
tasks = {
"relation_info": self._build_relation_info(sender_name, target_message, user_id),
"memory_block": self._build_memory_block(chat_history, target_message),
"expression_habits": self._build_expression_habits(chat_history, target_message),
"schedule": self._build_schedule_block(),
"time": self._build_time_block(),
}
results = {}
try:
task_results = await asyncio.gather(
*[self._wrap_task(name, coro) for name, coro in tasks.items()],
return_exceptions=True
)
for result in task_results:
if isinstance(result, tuple):
name, value = result
results[name] = value
else:
logger.warning(f"上下文构建任务异常: {result}")
except Exception as e:
logger.error(f"并行构建上下文失败: {e}")
return results
async def _wrap_task(self, name: str, coro) -> tuple[str, str]:
"""包装任务以返回名称和结果"""
try:
result = await coro
return (name, result or "")
except Exception as e:
logger.error(f"构建 {name} 失败: {e}")
return (name, "")
async def _get_chat_history_text(
self,
context: Optional["StreamContext"] = None,
limit: int = 20,
) -> str:
"""获取聊天历史文本"""
if context is None:
return ""
try:
from src.chat.utils.chat_message_builder import build_readable_messages
messages = context.get_messages(limit=limit, include_unread=True)
if not messages:
return ""
msg_dicts = [msg.flatten() for msg in messages]
return await build_readable_messages(
msg_dicts,
replace_bot_name=True,
timestamp_mode="relative",
truncate=True,
)
except Exception as e:
logger.error(f"获取聊天历史失败: {e}")
return ""
async def _build_relation_info(self, sender_name: str, target_message: str, user_id: Optional[str] = None) -> str:
"""构建关系信息块"""
config = _get_config()
if sender_name == f"{config.bot.nickname}(你)":
return "你将要回复的是你自己发送的消息。"
person_info_manager = get_person_info_manager()
# 优先使用 user_id + platform 获取 person_id
person_id = None
if user_id and self.platform:
person_id = person_info_manager.get_person_id(self.platform, user_id)
logger.debug(f"通过 platform={self.platform}, user_id={user_id} 获取 person_id={person_id}")
# 如果没有找到,尝试通过 person_name 查找
if not person_id:
person_id = await person_info_manager.get_person_id_by_person_name(sender_name)
if not person_id:
logger.debug(f"未找到用户 {sender_name} 的 person_id")
return f"你与{sender_name}还没有建立深厚的关系,这是早期的互动阶段。"
try:
from src.person_info.relationship_fetcher import relationship_fetcher_manager
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_id)
user_relation_info = await relationship_fetcher.build_relation_info(person_id, points_num=5)
stream_impression = await relationship_fetcher.build_chat_stream_impression(self.chat_id)
parts = []
if user_relation_info:
parts.append(f"### 你与 {sender_name} 的关系\n{user_relation_info}")
if stream_impression:
scene_type = "这个群" if self.is_group_chat else "你们的私聊"
parts.append(f"### 你对{scene_type}的印象\n{stream_impression}")
if parts:
return "\n\n".join(parts)
else:
return f"你与{sender_name}还没有建立深厚的关系,这是早期的互动阶段。"
except Exception as e:
logger.error(f"获取关系信息失败: {e}")
return f"你与{sender_name}是普通朋友关系。"
async def _build_memory_block(self, chat_history: str, target_message: str) -> str:
"""构建记忆块(使用三层记忆系统)"""
config = _get_config()
if not (config.memory and config.memory.enable):
return ""
try:
from src.memory_graph.manager_singleton import get_unified_memory_manager
from src.memory_graph.utils.three_tier_formatter import memory_formatter
unified_manager = get_unified_memory_manager()
if not unified_manager:
logger.debug("[三层记忆] 管理器未初始化")
return ""
search_result = await unified_manager.search_memories(
query_text=target_message,
use_judge=True,
recent_chat_history=chat_history,
)
if not search_result:
return ""
perceptual_blocks = search_result.get("perceptual_blocks", [])
short_term_memories = search_result.get("short_term_memories", [])
long_term_memories = search_result.get("long_term_memories", [])
formatted_memories = await memory_formatter.format_all_tiers(
perceptual_blocks=perceptual_blocks,
short_term_memories=short_term_memories,
long_term_memories=long_term_memories
)
total_count = len(perceptual_blocks) + len(short_term_memories) + len(long_term_memories)
if total_count > 0 and formatted_memories.strip():
logger.info(
f"[三层记忆] 检索到 {total_count} 条记忆 "
f"(感知:{len(perceptual_blocks)}, 短期:{len(short_term_memories)}, 长期:{len(long_term_memories)})"
)
return f"### 🧠 相关记忆\n\n{formatted_memories}"
return ""
except Exception as e:
logger.error(f"[三层记忆] 检索失败: {e}")
return ""
async def _build_expression_habits(self, chat_history: str, target_message: str) -> str:
"""构建表达习惯块"""
config = _get_config()
use_expression, _, _ = config.expression.get_expression_config_for_chat(self.chat_id)
if not use_expression:
return ""
try:
from src.chat.express.expression_selector import expression_selector
style_habits = []
grammar_habits = []
selected_expressions = await expression_selector.select_suitable_expressions(
chat_id=self.chat_id,
chat_history=chat_history,
target_message=target_message,
max_num=8,
min_num=2
)
if selected_expressions:
for expr in selected_expressions:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
expr_type = expr.get("type", "style")
habit_str = f"{expr['situation']}时,使用 {expr['style']}"
if expr_type == "grammar":
grammar_habits.append(habit_str)
else:
style_habits.append(habit_str)
parts = []
if style_habits:
parts.append("**语言风格习惯**\n" + "\n".join(f"- {h}" for h in style_habits))
if grammar_habits:
parts.append("**句法习惯**\n" + "\n".join(f"- {h}" for h in grammar_habits))
if parts:
return "### 💬 你的表达习惯\n\n" + "\n\n".join(parts)
return ""
except Exception as e:
logger.error(f"构建表达习惯失败: {e}")
return ""
async def _build_schedule_block(self) -> str:
"""构建日程信息块"""
config = _get_config()
if not config.planning_system.schedule_enable:
return ""
try:
from src.schedule.schedule_manager import schedule_manager
activity_info = schedule_manager.get_current_activity()
if not activity_info:
return ""
activity = activity_info.get("activity")
time_range = activity_info.get("time_range")
now = datetime.now()
if time_range:
try:
start_str, end_str = time_range.split("-")
start_time = datetime.strptime(start_str.strip(), "%H:%M").replace(
year=now.year, month=now.month, day=now.day
)
end_time = datetime.strptime(end_str.strip(), "%H:%M").replace(
year=now.year, month=now.month, day=now.day
)
if end_time < start_time:
end_time += timedelta(days=1)
if now < start_time:
now += timedelta(days=1)
duration_minutes = (now - start_time).total_seconds() / 60
remaining_minutes = (end_time - now).total_seconds() / 60
return (
f"你当前正在「{activity}」,"
f"{start_time.strftime('%H:%M')}开始,预计{end_time.strftime('%H:%M')}结束,"
f"已进行{duration_minutes:.0f}分钟,还剩约{remaining_minutes:.0f}分钟。"
)
except (ValueError, AttributeError):
pass
return f"你当前正在「{activity}"
except Exception as e:
logger.error(f"构建日程块失败: {e}")
return ""
async def _build_time_block(self) -> str:
"""构建时间信息块"""
now = datetime.now()
weekdays = ["周一", "周二", "周三", "周四", "周五", "周六", "周日"]
weekday = weekdays[now.weekday()]
return f"{now.strftime('%Y年%m月%d')} {weekday} {now.strftime('%H:%M:%S')}"
async def build_kfc_context(
chat_stream: "ChatStream",
sender_name: str,
target_message: str,
context: Optional["StreamContext"] = None,
user_id: Optional[str] = None,
) -> dict[str, str]:
"""
便捷函数构建KFC所需的所有上下文
"""
builder = KFCContextBuilder(chat_stream)
return await builder.build_all_context(sender_name, target_message, context, user_id)
__all__ = [
"KFCContextBuilder",
"build_kfc_context",
]

View File

@@ -0,0 +1,320 @@
"""
Kokoro Flow Chatter - 数据模型
定义核心数据结构:
- EventType: 活动流事件类型
- SessionStatus: 会话状态(仅 IDLE 和 WAITING
- MentalLogEntry: 心理活动日志条目
- WaitingConfig: 等待配置
- ActionModel: 动作模型
- LLMResponse: LLM 响应结构
"""
from dataclasses import dataclass, field
from enum import Enum
from typing import Any
import time
class EventType(Enum):
"""
活动流事件类型
用于标记 mental_log 中不同类型的事件,
每种类型对应一个提示词小模板
"""
# 用户相关
USER_MESSAGE = "user_message" # 用户发送消息
# Bot 行动相关
BOT_PLANNING = "bot_planning" # Bot 规划thought + actions
# 等待相关
WAITING_START = "waiting_start" # 开始等待
WAITING_UPDATE = "waiting_update" # 等待期间心理变化
REPLY_RECEIVED_IN_TIME = "reply_in_time" # 在预期内收到回复
REPLY_RECEIVED_LATE = "reply_late" # 超出预期收到回复
WAIT_TIMEOUT = "wait_timeout" # 等待超时
# 主动思考相关
PROACTIVE_TRIGGER = "proactive_trigger" # 主动思考触发(长期沉默)
def __str__(self) -> str:
return self.value
class SessionStatus(Enum):
"""
会话状态
极简设计,只有两种稳定状态:
- IDLE: 空闲,没有期待回复
- WAITING: 等待对方回复中
"""
IDLE = "idle"
WAITING = "waiting"
def __str__(self) -> str:
return self.value
@dataclass
class WaitingConfig:
"""
等待配置
当 Bot 发送消息后设置的等待参数
"""
expected_reaction: str = "" # 期望对方如何回应
max_wait_seconds: int = 0 # 最长等待时间0 表示不等待
started_at: float = 0.0 # 开始等待的时间戳
last_thinking_at: float = 0.0 # 上次连续思考的时间戳
thinking_count: int = 0 # 连续思考次数
def is_active(self) -> bool:
"""是否正在等待"""
return self.max_wait_seconds > 0 and self.started_at > 0
def get_elapsed_seconds(self) -> float:
"""获取已等待时间(秒)"""
if not self.is_active():
return 0.0
return time.time() - self.started_at
def get_elapsed_minutes(self) -> float:
"""获取已等待时间(分钟)"""
return self.get_elapsed_seconds() / 60
def is_timeout(self) -> bool:
"""是否已超时"""
if not self.is_active():
return False
return self.get_elapsed_seconds() >= self.max_wait_seconds
def get_progress(self) -> float:
"""获取等待进度 (0.0 - 1.0)"""
if not self.is_active() or self.max_wait_seconds <= 0:
return 0.0
return min(self.get_elapsed_seconds() / self.max_wait_seconds, 1.0)
def to_dict(self) -> dict[str, Any]:
return {
"expected_reaction": self.expected_reaction,
"max_wait_seconds": self.max_wait_seconds,
"started_at": self.started_at,
"last_thinking_at": self.last_thinking_at,
"thinking_count": self.thinking_count,
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "WaitingConfig":
return cls(
expected_reaction=data.get("expected_reaction", ""),
max_wait_seconds=data.get("max_wait_seconds", 0),
started_at=data.get("started_at", 0.0),
last_thinking_at=data.get("last_thinking_at", 0.0),
thinking_count=data.get("thinking_count", 0),
)
def reset(self) -> None:
"""重置等待配置"""
self.expected_reaction = ""
self.max_wait_seconds = 0
self.started_at = 0.0
self.last_thinking_at = 0.0
self.thinking_count = 0
@dataclass
class MentalLogEntry:
"""
心理活动日志条目
记录活动流中的每一个事件节点,
用于构建线性叙事风格的提示词
"""
event_type: EventType
timestamp: float
# 通用字段
content: str = "" # 事件内容(消息文本、动作描述等)
# 用户消息相关
user_name: str = "" # 发送者名称
user_id: str = "" # 发送者 ID
# Bot 规划相关
thought: str = "" # 内心想法
actions: list[dict] = field(default_factory=list) # 执行的动作列表
expected_reaction: str = "" # 期望的回应
max_wait_seconds: int = 0 # 设定的等待时间
# 等待相关
elapsed_seconds: float = 0.0 # 已等待时间
waiting_thought: str = "" # 等待期间的想法
mood: str = "" # 当前心情
# 元数据
metadata: dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> dict[str, Any]:
return {
"event_type": str(self.event_type),
"timestamp": self.timestamp,
"content": self.content,
"user_name": self.user_name,
"user_id": self.user_id,
"thought": self.thought,
"actions": self.actions,
"expected_reaction": self.expected_reaction,
"max_wait_seconds": self.max_wait_seconds,
"elapsed_seconds": self.elapsed_seconds,
"waiting_thought": self.waiting_thought,
"mood": self.mood,
"metadata": self.metadata,
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "MentalLogEntry":
event_type_str = data.get("event_type", "user_message")
try:
event_type = EventType(event_type_str)
except ValueError:
event_type = EventType.USER_MESSAGE
return cls(
event_type=event_type,
timestamp=data.get("timestamp", time.time()),
content=data.get("content", ""),
user_name=data.get("user_name", ""),
user_id=data.get("user_id", ""),
thought=data.get("thought", ""),
actions=data.get("actions", []),
expected_reaction=data.get("expected_reaction", ""),
max_wait_seconds=data.get("max_wait_seconds", 0),
elapsed_seconds=data.get("elapsed_seconds", 0.0),
waiting_thought=data.get("waiting_thought", ""),
mood=data.get("mood", ""),
metadata=data.get("metadata", {}),
)
def get_time_str(self, format: str = "%H:%M") -> str:
"""获取格式化的时间字符串"""
return time.strftime(format, time.localtime(self.timestamp))
@dataclass
class ActionModel:
"""
动作模型
表示 LLM 决策的单个动作
"""
type: str # 动作类型
params: dict[str, Any] = field(default_factory=dict) # 动作参数
reason: str = "" # 选择该动作的理由
def to_dict(self) -> dict[str, Any]:
result = {"type": self.type}
if self.reason:
result["reason"] = self.reason
result.update(self.params)
return result
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "ActionModel":
action_type = data.get("type", "do_nothing")
reason = data.get("reason", "")
params = {k: v for k, v in data.items() if k not in ("type", "reason")}
return cls(type=action_type, params=params, reason=reason)
def get_description(self) -> str:
"""获取动作的文字描述"""
if self.type == "kfc_reply":
content = self.params.get("content", "")
return f'发送消息:"{content[:50]}{"..." if len(content) > 50 else ""}"'
elif self.type == "poke_user":
return "戳了戳对方"
elif self.type == "do_nothing":
return "什么都没做"
elif self.type == "send_emoji":
emoji = self.params.get("emoji", "")
return f"发送表情:{emoji}"
else:
return f"执行动作:{self.type}"
@dataclass
class LLMResponse:
"""
LLM 响应结构
定义 LLM 输出的 JSON 格式
"""
thought: str # 内心想法
actions: list[ActionModel] # 动作列表
expected_reaction: str = "" # 期望对方的回应
max_wait_seconds: int = 0 # 最长等待时间0 = 不等待)
# 可选字段
mood: str = "" # 当前心情
def to_dict(self) -> dict[str, Any]:
return {
"thought": self.thought,
"actions": [a.to_dict() for a in self.actions],
"expected_reaction": self.expected_reaction,
"max_wait_seconds": self.max_wait_seconds,
"mood": self.mood,
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> "LLMResponse":
actions_data = data.get("actions", [])
actions = [ActionModel.from_dict(a) for a in actions_data] if actions_data else []
# 如果没有动作,添加默认的 do_nothing
if not actions:
actions = [ActionModel(type="do_nothing")]
# 处理 max_wait_seconds确保在合理范围内
max_wait = data.get("max_wait_seconds", 0)
try:
max_wait = int(max_wait)
max_wait = max(0, min(max_wait, 1800)) # 0-30分钟
except (ValueError, TypeError):
max_wait = 0
return cls(
thought=data.get("thought", ""),
actions=actions,
expected_reaction=data.get("expected_reaction", ""),
max_wait_seconds=max_wait,
mood=data.get("mood", ""),
)
@classmethod
def create_error_response(cls, error_message: str) -> "LLMResponse":
"""创建错误响应"""
return cls(
thought=f"出现了问题:{error_message}",
actions=[ActionModel(type="do_nothing")],
expected_reaction="",
max_wait_seconds=0,
)
def has_reply(self) -> bool:
"""是否包含回复动作"""
return any(a.type in ("kfc_reply", "respond") for a in self.actions)
def get_reply_content(self) -> str:
"""获取回复内容"""
for action in self.actions:
if action.type in ("kfc_reply", "respond"):
return action.params.get("content", "")
return ""
def get_actions_description(self) -> str:
"""获取所有动作的文字描述"""
descriptions = [a.get_description() for a in self.actions]
return " + ".join(descriptions)

View File

@@ -0,0 +1,113 @@
"""
Kokoro Flow Chatter - Planner
规划器:负责分析情境并生成行动计划
- 输入:会话状态、用户消息、情境类型
- 输出LLMResponse包含 thought、actions、expected_reaction、max_wait_seconds
- 不负责生成具体回复文本,只决定"要做什么"
"""
from typing import TYPE_CHECKING, Optional
from src.common.logger import get_logger
from src.plugin_system.apis import llm_api
from src.utils.json_parser import extract_and_parse_json
from .models import LLMResponse
from .prompt.builder import get_prompt_builder
from .session import KokoroSession
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
logger = get_logger("kfc_planner")
async def generate_plan(
session: KokoroSession,
user_name: str,
situation_type: str = "new_message",
chat_stream: Optional["ChatStream"] = None,
available_actions: Optional[dict] = None,
extra_context: Optional[dict] = None,
) -> LLMResponse:
"""
生成行动计划
Args:
session: 会话对象
user_name: 用户名称
situation_type: 情况类型
chat_stream: 聊天流对象
available_actions: 可用动作字典
extra_context: 额外上下文
Returns:
LLMResponse 对象,包含计划信息
"""
try:
# 1. 构建规划器提示词
prompt_builder = get_prompt_builder()
prompt = await prompt_builder.build_planner_prompt(
session=session,
user_name=user_name,
situation_type=situation_type,
chat_stream=chat_stream,
available_actions=available_actions,
extra_context=extra_context,
)
from src.config.config import global_config
if global_config and global_config.debug.show_prompt:
logger.info(f"[KFC Planner] 生成的规划提示词:\n{prompt}")
# 2. 获取 planner 模型配置并调用 LLM
models = llm_api.get_available_models()
planner_config = models.get("planner")
if not planner_config:
logger.error("[KFC Planner] 未找到 planner 模型配置")
return LLMResponse.create_error_response("未找到 planner 模型配置")
success, raw_response, reasoning, model_name = await llm_api.generate_with_model(
prompt=prompt,
model_config=planner_config,
request_type="kokoro_flow_chatter.plan",
)
if not success:
logger.error(f"[KFC Planner] LLM 调用失败: {raw_response}")
return LLMResponse.create_error_response(raw_response)
logger.debug(f"[KFC Planner] LLM 响应 (model={model_name}):\n{raw_response}")
# 3. 解析响应
return _parse_response(raw_response)
except Exception as e:
logger.error(f"[KFC Planner] 生成失败: {e}")
import traceback
traceback.print_exc()
return LLMResponse.create_error_response(str(e))
def _parse_response(raw_response: str) -> LLMResponse:
"""解析 LLM 响应"""
data = extract_and_parse_json(raw_response, strict=False)
if not data or not isinstance(data, dict):
logger.warning(f"[KFC Planner] 无法解析 JSON: {raw_response[:200]}...")
return LLMResponse.create_error_response("无法解析响应格式")
response = LLMResponse.from_dict(data)
if response.thought:
# 使用 logger 输出美化日志(颜色通过 logger 系统配置)
logger.info(f"💭 {response.thought}")
actions_str = ", ".join(a.type for a in response.actions)
logger.debug(f"actions={actions_str}")
else:
logger.warning("响应缺少 thought")
return response

View File

@@ -0,0 +1,117 @@
"""
Kokoro Flow Chatter - 插件注册
注册 Chatter
"""
from typing import Any, ClassVar
from src.common.logger import get_logger
from src.plugin_system.base.base_plugin import BasePlugin
from src.plugin_system.base.component_types import ChatterInfo
from src.plugin_system import register_plugin
from .chatter import KokoroFlowChatter
from .config import get_config
from .proactive_thinker import start_proactive_thinker, stop_proactive_thinker
logger = get_logger("kfc_plugin")
@register_plugin
class KokoroFlowChatterPlugin(BasePlugin):
"""
Kokoro Flow Chatter 插件
专为私聊设计的增强 Chatter
- 线性叙事提示词架构
- 等待机制与心理状态演变
- 主动思考能力
"""
plugin_name: str = "kokoro_flow_chatter"
enable_plugin: bool = True
plugin_priority: int = 50 # 高于默认 Chatter
dependencies: ClassVar[list[str]] = []
python_dependencies: ClassVar[list[str]] = []
config_file_name: str = "config.toml"
# 状态
_is_started: bool = False
async def on_plugin_loaded(self):
"""插件加载时"""
config = get_config()
if not config.enabled:
logger.info("[KFC] 插件已禁用")
return
logger.info("[KFC] 插件已加载")
# 启动主动思考器
if config.proactive.enabled:
try:
await start_proactive_thinker()
logger.info("[KFC] 主动思考器已启动")
self._is_started = True
except Exception as e:
logger.error(f"[KFC] 启动主动思考器失败: {e}")
async def on_plugin_unloaded(self):
"""插件卸载时"""
try:
await stop_proactive_thinker()
logger.info("[KFC] 主动思考器已停止")
self._is_started = False
except Exception as e:
logger.warning(f"[KFC] 停止主动思考器失败: {e}")
def get_plugin_components(self):
"""返回组件列表"""
config = get_config()
if not config.enabled:
return []
components = []
try:
# 注册 Chatter
components.append((
KokoroFlowChatter.get_chatter_info(),
KokoroFlowChatter,
))
logger.debug("[KFC] 成功加载 KokoroFlowChatter 组件")
except Exception as e:
logger.error(f"[KFC] 加载 Chatter 组件失败: {e}")
try:
# 注册 KFC 专属 Reply 动作
from .actions.reply import KFCReplyAction
components.append((
KFCReplyAction.get_action_info(),
KFCReplyAction,
))
logger.debug("[KFC] 成功加载 KFCReplyAction 组件")
except Exception as e:
logger.error(f"[KFC] 加载 Reply 动作失败: {e}")
return components
def get_plugin_info(self) -> dict[str, Any]:
"""获取插件信息"""
return {
"name": self.plugin_name,
"display_name": "Kokoro Flow Chatter",
"version": "2.0.0",
"author": "MoFox",
"description": "专为私聊设计的增强 Chatter",
"features": [
"线性叙事提示词架构",
"心理活动流记录",
"等待机制与超时处理",
"主动思考能力",
],
}

View File

@@ -0,0 +1,788 @@
"""
Kokoro Flow Chatter - 主动思考器
独立组件,负责:
1. 等待期间的连续思考(更新心理状态)
2. 等待超时决策(继续等 or 做点什么)
3. 长期沉默后主动发起对话
通过 UnifiedScheduler 定期触发,与 Chatter 解耦
支持两种工作模式(与 Chatter 保持一致):
- unified: 单次 LLM 调用完成思考和回复
- split: Planner + Replyer 两次 LLM 调用
"""
import asyncio
import random
import time
from datetime import datetime
from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional
from src.chat.planner_actions.action_manager import ChatterActionManager
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.apis.unified_scheduler import TriggerType, unified_scheduler
from .config import KFCMode, get_config
from .models import EventType, SessionStatus
from .session import KokoroSession, get_session_manager
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
logger = get_logger("kfc_proactive_thinker")
class ProactiveThinker:
"""
主动思考器
独立于 Chatter负责处理
1. 等待期间的连续思考
2. 等待超时
3. 长期沉默后主动发起
核心逻辑:
- 定期检查所有 WAITING 状态的 Session
- 触发连续思考或超时决策
- 定期检查长期沉默的 Session考虑主动发起
支持两种工作模式(与 Chatter 保持一致):
- unified: 单次 LLM 调用
- split: Planner + Replyer 两次调用
"""
# 连续思考触发点(等待进度百分比)
THINKING_TRIGGERS = [0.3, 0.6, 0.85]
# 任务名称
TASK_WAITING_CHECK = "kfc_waiting_check"
TASK_PROACTIVE_CHECK = "kfc_proactive_check"
def __init__(self):
self.session_manager = get_session_manager()
# 配置
self._load_config()
# 调度任务 ID
self._waiting_schedule_id: Optional[str] = None
self._proactive_schedule_id: Optional[str] = None
self._running = False
# 统计
self._stats = {
"waiting_checks": 0,
"continuous_thinking_triggered": 0,
"timeout_decisions": 0,
"proactive_triggered": 0,
}
def _load_config(self) -> None:
"""加载配置 - 使用统一的配置系统"""
config = get_config()
proactive_cfg = config.proactive
# 工作模式
self._mode = config.mode
# 等待检查间隔(秒)
self.waiting_check_interval = 15.0
# 主动思考检查间隔(秒)
self.proactive_check_interval = 300.0
# 从配置读取主动思考相关设置
self.proactive_enabled = proactive_cfg.enabled
self.silence_threshold = proactive_cfg.silence_threshold_seconds
self.min_proactive_interval = proactive_cfg.min_interval_between_proactive
self.quiet_hours_start = proactive_cfg.quiet_hours_start
self.quiet_hours_end = proactive_cfg.quiet_hours_end
self.trigger_probability = proactive_cfg.trigger_probability
self.min_affinity_for_proactive = proactive_cfg.min_affinity_for_proactive
async def start(self) -> None:
"""启动主动思考器"""
if self._running:
logger.info("已在运行中")
return
self._running = True
# 注册等待检查任务(始终启用,用于处理等待中的 Session
self._waiting_schedule_id = await unified_scheduler.create_schedule(
callback=self._check_waiting_sessions,
trigger_type=TriggerType.TIME,
trigger_config={"delay_seconds": self.waiting_check_interval},
is_recurring=True,
task_name=self.TASK_WAITING_CHECK,
force_overwrite=True,
timeout=60.0,
)
# 注册主动思考检查任务(仅在启用时注册)
if self.proactive_enabled:
self._proactive_schedule_id = await unified_scheduler.create_schedule(
callback=self._check_proactive_sessions,
trigger_type=TriggerType.TIME,
trigger_config={"delay_seconds": self.proactive_check_interval},
is_recurring=True,
task_name=self.TASK_PROACTIVE_CHECK,
force_overwrite=True,
timeout=120.0,
)
logger.info("[ProactiveThinker] 已启动(主动思考已启用)")
else:
logger.info("[ProactiveThinker] 已启动(主动思考已禁用)")
async def stop(self) -> None:
"""停止主动思考器"""
if not self._running:
return
self._running = False
if self._waiting_schedule_id:
await unified_scheduler.remove_schedule(self._waiting_schedule_id)
if self._proactive_schedule_id:
await unified_scheduler.remove_schedule(self._proactive_schedule_id)
logger.info("[ProactiveThinker] 已停止")
# ========================
# 等待检查
# ========================
async def _check_waiting_sessions(self) -> None:
"""检查所有等待中的 Session"""
self._stats["waiting_checks"] += 1
sessions = await self.session_manager.get_waiting_sessions()
if not sessions:
return
# 并行处理
tasks = [
asyncio.create_task(self._process_waiting_session(s))
for s in sessions
]
await asyncio.gather(*tasks, return_exceptions=True)
async def _process_waiting_session(self, session: KokoroSession) -> None:
"""处理单个等待中的 Session"""
try:
if session.status != SessionStatus.WAITING:
return
if not session.waiting_config.is_active():
return
# 防止与 Chatter 并发处理:如果 Session 刚刚被更新5秒内跳过
# 这样可以避免 Chatter 正在处理时ProactiveThinker 也开始处理
time_since_last_activity = time.time() - session.last_activity_at
if time_since_last_activity < 5:
logger.debug(
f"[ProactiveThinker] Session {session.user_id} 刚有活动 "
f"({time_since_last_activity:.1f}s ago),跳过处理"
)
return
# 检查是否超时
if session.waiting_config.is_timeout():
await self._handle_timeout(session)
return
# 检查是否需要触发连续思考
progress = session.waiting_config.get_progress()
if self._should_trigger_thinking(session, progress):
await self._handle_continuous_thinking(session, progress)
except Exception as e:
logger.error(f"[ProactiveThinker] 处理等待 Session 失败 {session.user_id}: {e}")
def _should_trigger_thinking(self, session: KokoroSession, progress: float) -> bool:
"""判断是否应触发连续思考"""
# 计算应该触发的次数
expected_count = sum(1 for t in self.THINKING_TRIGGERS if progress >= t)
if session.waiting_config.thinking_count >= expected_count:
return False
# 确保两次思考之间有间隔
if session.waiting_config.last_thinking_at > 0:
elapsed = time.time() - session.waiting_config.last_thinking_at
if elapsed < 30: # 至少 30 秒间隔
return False
return True
async def _handle_continuous_thinking(
self,
session: KokoroSession,
progress: float,
) -> None:
"""处理连续思考"""
self._stats["continuous_thinking_triggered"] += 1
# 获取用户名
user_name = await self._get_user_name(session.user_id, session.stream_id)
# 调用 LLM 生成等待中的想法
thought = await self._generate_waiting_thought(session, user_name, progress)
# 记录到 mental_log
session.add_waiting_update(
waiting_thought=thought,
mood="", # 心情已融入 thought 中
)
# 更新思考计数
session.waiting_config.thinking_count += 1
session.waiting_config.last_thinking_at = time.time()
# 保存
await self.session_manager.save_session(session.user_id)
logger.debug(
f"[ProactiveThinker] 连续思考: user={session.user_id}, "
f"progress={progress:.1%}, thought={thought[:30]}..."
)
async def _generate_waiting_thought(
self,
session: KokoroSession,
user_name: str,
progress: float,
) -> str:
"""调用 LLM 生成等待中的想法"""
try:
from src.chat.utils.prompt import global_prompt_manager
from src.plugin_system.apis import llm_api
from .prompt.builder import get_prompt_builder
from .prompt.prompts import PROMPT_NAMES
# 使用 PromptBuilder 构建人设块
prompt_builder = get_prompt_builder()
persona_block = prompt_builder._build_persona_block()
# 获取关系信息
relation_block = f"你与 {user_name} 还不太熟悉。"
try:
from src.person_info.relationship_manager import relationship_manager
person_info_manager = await self._get_person_info_manager()
if person_info_manager:
platform = global_config.bot.platform if global_config else "qq"
person_id = person_info_manager.get_person_id(platform, session.user_id)
relationship = await relationship_manager.get_relationship(person_id)
if relationship:
relation_block = f"你与 {user_name} 的亲密度是 {relationship.intimacy}{relationship.description or ''}"
except Exception as e:
logger.debug(f"获取关系信息失败: {e}")
# 获取上次发送的消息
last_bot_message = "(未知)"
for entry in reversed(session.mental_log):
if entry.event_type == EventType.BOT_PLANNING and entry.actions:
for action in entry.actions:
if action.get("type") == "kfc_reply":
content = action.get("content", "")
if content:
last_bot_message = content[:100] + ("..." if len(content) > 100 else "")
break
if last_bot_message != "(未知)":
break
# 构建提示词
elapsed_minutes = session.waiting_config.get_elapsed_minutes()
max_wait_minutes = session.waiting_config.max_wait_seconds / 60
expected_reaction = session.waiting_config.expected_reaction or "对方能回复点什么"
prompt = await global_prompt_manager.format_prompt(
PROMPT_NAMES["waiting_thought"],
persona_block=persona_block,
user_name=user_name,
relation_block=relation_block,
last_bot_message=last_bot_message,
expected_reaction=expected_reaction,
elapsed_minutes=elapsed_minutes,
max_wait_minutes=max_wait_minutes,
progress_percent=int(progress * 100),
)
# 调用情绪模型
models = llm_api.get_available_models()
emotion_config = models.get("emotion") or models.get("replyer")
if not emotion_config:
logger.warning("[ProactiveThinker] 未找到 emotion/replyer 模型配置,使用默认想法")
return self._get_fallback_thought(elapsed_minutes, progress)
success, raw_response, _, model_name = await llm_api.generate_with_model(
prompt=prompt,
model_config=emotion_config,
request_type="kokoro_flow_chatter.waiting_thought",
)
if not success or not raw_response:
logger.warning(f"[ProactiveThinker] LLM 调用失败: {raw_response}")
return self._get_fallback_thought(elapsed_minutes, progress)
# 使用统一的文本清理函数
from .replyer import _clean_reply_text
thought = _clean_reply_text(raw_response)
logger.debug(f"[ProactiveThinker] LLM 生成等待想法 (model={model_name}): {thought[:50]}...")
return thought
except Exception as e:
logger.error(f"[ProactiveThinker] 生成等待想法失败: {e}")
import traceback
traceback.print_exc()
return self._get_fallback_thought(
session.waiting_config.get_elapsed_minutes(),
progress
)
def _get_fallback_thought(self, elapsed_minutes: float, progress: float) -> str:
"""获取备用的等待想法(当 LLM 调用失败时使用)"""
if progress < 0.4:
thoughts = [
f"已经等了 {elapsed_minutes:.0f} 分钟了,对方可能在忙吧...",
"不知道对方在做什么呢",
"再等等看吧",
]
elif progress < 0.7:
thoughts = [
f"等了 {elapsed_minutes:.0f} 分钟了,有点担心...",
"对方是不是忘记回复了?",
"嗯...还是没有消息",
]
else:
thoughts = [
f"已经等了 {elapsed_minutes:.0f} 分钟了,感觉有点焦虑",
"要不要主动说点什么呢...",
"快到时间了,对方还是没回",
]
return random.choice(thoughts)
async def _get_person_info_manager(self):
"""获取 person_info_manager"""
try:
from src.person_info.person_info import get_person_info_manager
return get_person_info_manager()
except Exception:
return None
async def _handle_timeout(self, session: KokoroSession) -> None:
"""处理等待超时 - 支持双模式"""
self._stats["timeout_decisions"] += 1
# 再次检查 Session 状态,防止在等待过程中被 Chatter 处理
if session.status != SessionStatus.WAITING:
logger.debug(f"[ProactiveThinker] Session {session.user_id} 已不在等待状态,跳过超时处理")
return
# 再次检查最近活动时间
time_since_last_activity = time.time() - session.last_activity_at
if time_since_last_activity < 5:
logger.debug(
f"[ProactiveThinker] Session {session.user_id} 刚有活动,跳过超时处理"
)
return
# 增加连续超时计数
session.consecutive_timeout_count += 1
logger.info(
f"[ProactiveThinker] 等待超时: user={session.user_id}, "
f"consecutive_timeout={session.consecutive_timeout_count}"
)
try:
# 获取用户名
user_name = await self._get_user_name(session.user_id, session.stream_id)
# 获取聊天流
chat_stream = await self._get_chat_stream(session.stream_id)
# 加载动作
action_manager = ChatterActionManager()
await action_manager.load_actions(session.stream_id)
# 通过 ActionModifier 过滤动作
from src.chat.planner_actions.action_modifier import ActionModifier
action_modifier = ActionModifier(action_manager, session.stream_id)
await action_modifier.modify_actions(chatter_name="KokoroFlowChatter")
# 计算用户最后回复距今的时间
time_since_user_reply = None
if session.last_user_message_at:
time_since_user_reply = time.time() - session.last_user_message_at
# 构建超时上下文信息
extra_context = {
"consecutive_timeout_count": session.consecutive_timeout_count,
"time_since_user_reply": time_since_user_reply,
"time_since_user_reply_str": self._format_duration(time_since_user_reply) if time_since_user_reply else "未知",
}
# 根据模式选择生成方式
if self._mode == KFCMode.UNIFIED:
# 统一模式:单次 LLM 调用
from .unified import generate_unified_response
plan_response = await generate_unified_response(
session=session,
user_name=user_name,
situation_type="timeout",
chat_stream=chat_stream,
available_actions=action_manager.get_using_actions(),
)
else:
# 分离模式Planner + Replyer
from .planner import generate_plan
plan_response = await generate_plan(
session=session,
user_name=user_name,
situation_type="timeout",
chat_stream=chat_stream,
available_actions=action_manager.get_using_actions(),
extra_context=extra_context,
)
# 分离模式下需要注入上下文信息
for action in plan_response.actions:
if action.type == "kfc_reply":
action.params["user_id"] = session.user_id
action.params["user_name"] = user_name
action.params["thought"] = plan_response.thought
action.params["situation_type"] = "timeout"
action.params["extra_context"] = extra_context
# 执行动作(回复生成在 Action.execute() 中完成)
for action in plan_response.actions:
await action_manager.execute_action(
action_name=action.type,
chat_id=session.stream_id,
target_message=None,
reasoning=plan_response.thought,
action_data=action.params,
thinking_id=None,
log_prefix="[KFC ProactiveThinker]",
)
# 记录到 mental_log
session.add_bot_planning(
thought=plan_response.thought,
actions=[a.to_dict() for a in plan_response.actions],
expected_reaction=plan_response.expected_reaction,
max_wait_seconds=plan_response.max_wait_seconds,
)
# 更新状态
if plan_response.max_wait_seconds > 0:
# 继续等待
session.start_waiting(
expected_reaction=plan_response.expected_reaction,
max_wait_seconds=plan_response.max_wait_seconds,
)
else:
# 不再等待
session.end_waiting()
# 保存
await self.session_manager.save_session(session.user_id)
logger.info(
f"[ProactiveThinker] 超时决策完成: user={session.user_id}, "
f"actions={[a.type for a in plan_response.actions]}, "
f"continue_wait={plan_response.max_wait_seconds > 0}, "
f"consecutive_timeout={session.consecutive_timeout_count}"
)
except Exception as e:
logger.error(f"[ProactiveThinker] 处理超时失败: {e}")
# 出错时结束等待
session.end_waiting()
await self.session_manager.save_session(session.user_id)
# ========================
# 主动思考(长期沉默)
# ========================
async def _check_proactive_sessions(self) -> None:
"""检查是否有需要主动发起对话的 Session"""
# 检查是否在勿扰时段
if self._is_quiet_hours():
return
sessions = await self.session_manager.get_all_sessions()
current_time = time.time()
for session in sessions:
try:
trigger_reason = self._should_trigger_proactive(session, current_time)
if trigger_reason:
await self._handle_proactive(session, trigger_reason)
except Exception as e:
logger.error(f"[ProactiveThinker] 检查主动思考失败 {session.user_id}: {e}")
def _is_quiet_hours(self) -> bool:
"""检查是否在勿扰时段"""
try:
now = datetime.now()
current_minutes = now.hour * 60 + now.minute
start_parts = self.quiet_hours_start.split(":")
start_minutes = int(start_parts[0]) * 60 + int(start_parts[1])
end_parts = self.quiet_hours_end.split(":")
end_minutes = int(end_parts[0]) * 60 + int(end_parts[1])
if start_minutes <= end_minutes:
return start_minutes <= current_minutes < end_minutes
else:
return current_minutes >= start_minutes or current_minutes < end_minutes
except:
return False
def _should_trigger_proactive(
self,
session: KokoroSession,
current_time: float,
) -> Optional[str]:
"""判断是否应触发主动思考"""
# 只检查 IDLE 状态的 Session
if session.status != SessionStatus.IDLE:
return None
# 检查沉默时长
silence_duration = current_time - session.last_activity_at
if silence_duration < self.silence_threshold:
return None
# 检查距离上次主动思考的间隔
if session.last_proactive_at:
time_since_last = current_time - session.last_proactive_at
if time_since_last < self.min_proactive_interval:
return None
# 概率触发(避免每次检查都触发)
if random.random() > self.trigger_probability:
return None
silence_hours = silence_duration / 3600
return f"沉默了 {silence_hours:.1f} 小时"
async def _handle_proactive(
self,
session: KokoroSession,
trigger_reason: str,
) -> None:
"""处理主动思考 - 支持双模式"""
self._stats["proactive_triggered"] += 1
# 再次检查最近活动时间,防止与 Chatter 并发
time_since_last_activity = time.time() - session.last_activity_at
if time_since_last_activity < 5:
logger.debug(
f"[ProactiveThinker] Session {session.user_id} 刚有活动,跳过主动思考"
)
return
logger.info(f"主动思考触发: user={session.user_id}, reason={trigger_reason}")
try:
# 获取用户名
user_name = await self._get_user_name(session.user_id, session.stream_id)
# 获取聊天流
chat_stream = await self._get_chat_stream(session.stream_id)
# 加载动作
action_manager = ChatterActionManager()
await action_manager.load_actions(session.stream_id)
# 通过 ActionModifier 过滤动作
from src.chat.planner_actions.action_modifier import ActionModifier
action_modifier = ActionModifier(action_manager, session.stream_id)
await action_modifier.modify_actions(chatter_name="KokoroFlowChatter")
# 计算沉默时长
silence_seconds = time.time() - session.last_activity_at
if silence_seconds < 3600:
silence_duration = f"{silence_seconds / 60:.0f} 分钟"
else:
silence_duration = f"{silence_seconds / 3600:.1f} 小时"
extra_context = {
"trigger_reason": trigger_reason,
"silence_duration": silence_duration,
}
# 根据模式选择生成方式
if self._mode == KFCMode.UNIFIED:
# 统一模式:单次 LLM 调用
from .unified import generate_unified_response
plan_response = await generate_unified_response(
session=session,
user_name=user_name,
situation_type="proactive",
chat_stream=chat_stream,
available_actions=action_manager.get_using_actions(),
extra_context=extra_context,
)
else:
# 分离模式Planner + Replyer
from .planner import generate_plan
plan_response = await generate_plan(
session=session,
user_name=user_name,
situation_type="proactive",
chat_stream=chat_stream,
available_actions=action_manager.get_using_actions(),
extra_context=extra_context,
)
# 检查是否决定不打扰
is_do_nothing = (
len(plan_response.actions) == 0 or
(len(plan_response.actions) == 1 and plan_response.actions[0].type == "do_nothing")
)
if is_do_nothing:
logger.info(f"决定不打扰: user={session.user_id}")
session.last_proactive_at = time.time()
await self.session_manager.save_session(session.user_id)
return
# 分离模式下需要注入上下文信息
if self._mode == KFCMode.SPLIT:
for action in plan_response.actions:
if action.type == "kfc_reply":
action.params["user_id"] = session.user_id
action.params["user_name"] = user_name
action.params["thought"] = plan_response.thought
action.params["situation_type"] = "proactive"
action.params["extra_context"] = extra_context
# 执行动作(回复生成在 Action.execute() 中完成)
for action in plan_response.actions:
await action_manager.execute_action(
action_name=action.type,
chat_id=session.stream_id,
target_message=None,
reasoning=plan_response.thought,
action_data=action.params,
thinking_id=None,
log_prefix="[KFC ProactiveThinker]",
)
# 记录到 mental_log
session.add_bot_planning(
thought=plan_response.thought,
actions=[a.to_dict() for a in plan_response.actions],
expected_reaction=plan_response.expected_reaction,
max_wait_seconds=plan_response.max_wait_seconds,
)
# 更新状态
session.last_proactive_at = time.time()
if plan_response.max_wait_seconds > 0:
session.start_waiting(
expected_reaction=plan_response.expected_reaction,
max_wait_seconds=plan_response.max_wait_seconds,
)
# 保存
await self.session_manager.save_session(session.user_id)
logger.info(
f"[ProactiveThinker] 主动发起完成: user={session.user_id}, "
f"actions={[a.type for a in plan_response.actions]}"
)
except Exception as e:
logger.error(f"[ProactiveThinker] 主动思考失败: {e}")
async def _get_chat_stream(self, stream_id: str):
"""获取聊天流"""
try:
from src.chat.message_receive.chat_stream import get_chat_manager
chat_manager = get_chat_manager()
if chat_manager:
return await chat_manager.get_stream(stream_id)
except Exception as e:
logger.warning(f"[ProactiveThinker] 获取 chat_stream 失败: {e}")
return None
async def _get_user_name(self, user_id: str, stream_id: str) -> str:
"""获取用户名称(优先从 person_info 获取)"""
try:
from src.person_info.person_info import get_person_info_manager
person_info_manager = get_person_info_manager()
platform = global_config.bot.platform if global_config else "qq"
person_id = person_info_manager.get_person_id(platform, user_id)
person_name = await person_info_manager.get_value(person_id, "person_name")
if person_name:
return person_name
except Exception as e:
logger.debug(f"[ProactiveThinker] 获取用户名失败: {e}")
# 回退到 user_id
return user_id
def _format_duration(self, seconds: float | None) -> str:
"""格式化时间间隔为人类可读的字符串"""
if seconds is None or seconds < 0:
return "未知"
if seconds < 60:
return f"{int(seconds)}"
elif seconds < 3600:
minutes = seconds / 60
return f"{minutes:.0f} 分钟"
elif seconds < 86400:
hours = seconds / 3600
return f"{hours:.1f} 小时"
else:
days = seconds / 86400
return f"{days:.1f}"
def get_stats(self) -> dict:
"""获取统计信息"""
return {
**self._stats,
"is_running": self._running,
}
# 全局单例
_proactive_thinker: Optional[ProactiveThinker] = None
def get_proactive_thinker() -> ProactiveThinker:
"""获取全局主动思考器"""
global _proactive_thinker
if _proactive_thinker is None:
_proactive_thinker = ProactiveThinker()
return _proactive_thinker
async def start_proactive_thinker() -> ProactiveThinker:
"""启动主动思考器"""
thinker = get_proactive_thinker()
await thinker.start()
return thinker
async def stop_proactive_thinker() -> None:
"""停止主动思考器"""
global _proactive_thinker
if _proactive_thinker:
await _proactive_thinker.stop()

View File

@@ -0,0 +1,16 @@
"""
Kokoro Flow Chatter V2 - 提示词模块
使用项目统一的 Prompt 管理系统管理所有提示词模板
"""
# 导入 prompts 模块以注册提示词
from . import prompts # noqa: F401
from .builder import PromptBuilder, get_prompt_builder
from .prompts import PROMPT_NAMES
__all__ = [
"PromptBuilder",
"get_prompt_builder",
"PROMPT_NAMES",
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,421 @@
"""
Kokoro Flow Chatter - 提示词模板注册
使用项目统一的 Prompt 管理系统注册所有 KFC V2 使用的提示词模板
"""
from src.chat.utils.prompt import Prompt
# =================================================================================================
# KFC V2 主提示词模板
# =================================================================================================
kfc_MAIN_PROMPT = Prompt(
name="kfc_main",
template="""# 你与 {user_name} 的私聊
## 人设
{persona_block}
## 你与 {user_name} 的关系
{relation_block}
## 相关记忆
{memory_block}
## 你们之间最近的活动记录
以下是你和 {user_name} 最近的互动历史,按时间顺序记录了你们的对话和你的心理活动:
{activity_stream}
## 当前情况
{current_situation}
## 聊天历史总览
以下是你和 {user_name} 的聊天记录,帮助你更好地理解对话上下文:
{chat_history_block}
## 你可以做的事情
{available_actions}
## 你的表达习惯
{expression_habits}
## 你的回复格式
{output_format}
""",
)
# =================================================================================================
# 输出格式模板
# =================================================================================================
kfc_OUTPUT_FORMAT = Prompt(
name="kfc_output_format",
template="""请用以下 JSON 格式回复:
```json
{{
"thought": "你脑子里在想什么,越自然越好",
"actions": [
{{"type": "动作名称", ...动作参数}}
],
"expected_reaction": "你期待对方的反应是什么",
"max_wait_seconds": 等待时间0 表示不等待
}}
```
### 字段说明
- `thought`:你的内心独白,记录你此刻的想法和感受。要自然,不要技术性语言。
- `actions`:你要执行的动作列表。每个动作是一个对象,必须包含 `type` 字段指定动作类型,其他字段根据动作类型不同而不同(参考上面每个动作的示例)。
- `expected_reaction`:你期待对方如何回应(用于判断是否需要等待)
- `max_wait_seconds`设定等待时间0 表示不等待,超时后你会考虑是否要主动说点什么。如果你认为聊天没有继续的必要,或不想打扰对方,可以设为 0。
### 注意事项
- 动作参数直接写在动作对象里,不需要 `action_data` 包装
- 即使什么都不想做,也放一个 `{{"type": "do_nothing"}}`
- 可以组合多个动作,比如先发消息再发表情""",
)
# =================================================================================================
# 情景模板 - 根据不同情境使用不同的当前情况描述
# =================================================================================================
kfc_SITUATION_NEW_MESSAGE = Prompt(
name="kfc_situation_new_message",
template="""现在是 {current_time}
{user_name} 刚刚给你发了消息。这是一次新的对话发起(不是对你之前消息的回复)。
请决定你要怎么回应。你可以:
- 发送文字消息回复
- 执行其他动作
- 什么都不做(如果觉得没必要回复)
- 或者组合多个动作""",
)
kfc_SITUATION_REPLY_IN_TIME = Prompt(
name="kfc_situation_reply_in_time",
template="""现在是 {current_time}
你之前发了消息后一直在等 {user_name} 的回复。
等了大约 {elapsed_minutes:.1f} 分钟(你原本打算最多等 {max_wait_minutes:.1f} 分钟)。
现在 {user_name} 回复了!
请决定你接下来要怎么回应。""",
)
kfc_SITUATION_REPLY_LATE = Prompt(
name="kfc_situation_reply_late",
template="""现在是 {current_time}
你之前发了消息后在等 {user_name} 的回复。
你原本打算最多等 {max_wait_minutes:.1f} 分钟,但实际等了 {elapsed_minutes:.1f} 分钟才收到回复。
虽然有点迟,但 {user_name} 终于回复了。
请决定你接下来要怎么回应。(可以选择轻轻抱怨一下迟到,也可以装作没在意)""",
)
kfc_SITUATION_TIMEOUT = Prompt(
name="kfc_situation_timeout",
template="""现在是 {current_time}
你之前发了消息后一直在等 {user_name} 的回复。
你原本打算最多等 {max_wait_minutes:.1f} 分钟,现在已经等了 {elapsed_minutes:.1f} 分钟了,对方还是没回。
你当时期待的反应是:"{expected_reaction}"
{timeout_context}
你需要决定:
1. 继续等待(设置新的 max_wait_seconds
2. 主动说点什么打破沉默
3. 做点别的事情(执行其他动作)
4. 算了不等了max_wait_seconds = 0
【注意】如果已经连续多次超时,对方可能暂时不方便回复。频繁主动发消息可能会打扰到对方。
考虑是否应该暂时放下期待,让对方有空间。""",
)
kfc_SITUATION_PROACTIVE = Prompt(
name="kfc_situation_proactive",
template="""现在是 {current_time}
你和 {user_name} 已经有一段时间没聊天了(沉默了 {silence_duration})。
{trigger_reason}
你在想要不要主动找 {user_name} 聊点什么。
请决定:
1. 主动发起对话(想个话题开场)
2. 做点动作试探一下
3. 算了现在不是好时机do_nothing
如果决定发起对话,想想用什么自然的方式开场,不要太突兀。""",
)
# =================================================================================================
# 活动流条目模板 - 用于构建 activity_stream
# =================================================================================================
# 用户消息条目
kfc_ENTRY_USER_MESSAGE = Prompt(
name="kfc_entry_user_message",
template="""{time}{user_name} 说:
"{content}"
""",
)
# Bot 规划条目(有等待)
kfc_ENTRY_BOT_PLANNING = Prompt(
name="kfc_entry_bot_planning",
template="""【你的想法】
内心:{thought}
行动:{actions_description}
期待:{expected_reaction}
决定等待:最多 {max_wait_minutes:.1f} 分钟
""",
)
# Bot 规划条目(无等待)
kfc_ENTRY_BOT_PLANNING_NO_WAIT = Prompt(
name="kfc_entry_bot_planning_no_wait",
template="""【你的想法】
内心:{thought}
行动:{actions_description}
(不打算等对方回复)
""",
)
# 等待期间心理变化
kfc_ENTRY_WAITING_UPDATE = Prompt(
name="kfc_entry_waiting_update",
template="""【等待中... {elapsed_minutes:.1f} 分钟过去了】
你想:{waiting_thought}
""",
)
# 收到及时回复时的标注
kfc_ENTRY_REPLY_IN_TIME = Prompt(
name="kfc_entry_reply_in_time",
template="""→ (对方在你预期时间内回复了,等了 {elapsed_minutes:.1f} 分钟)
""",
)
# 收到迟到回复时的标注
kfc_ENTRY_REPLY_LATE = Prompt(
name="kfc_entry_reply_late",
template="""→ (对方回复迟了,你原本只打算等 {max_wait_minutes:.1f} 分钟,实际等了 {elapsed_minutes:.1f} 分钟)
""",
)
# 主动思考触发
kfc_ENTRY_PROACTIVE_TRIGGER = Prompt(
name="kfc_entry_proactive_trigger",
template="""【沉默了 {silence_duration}
你开始考虑要不要主动找对方聊点什么...
""",
)
# =================================================================================================
# Planner 专用输出格式
# =================================================================================================
kfc_PLANNER_OUTPUT_FORMAT = Prompt(
name="kfc_planner_output_format",
template="""请用以下 JSON 格式回复:
```json
{{
"thought": "你脑子里在想什么,越自然越好",
"actions": [
{{"type": "动作名称", ...动作参数}}
],
"expected_reaction": "你期待对方的反应是什么",
"max_wait_seconds": 等待时间0 表示不等待
}}
```
### 字段说明
- `thought`:你的内心独白,记录你此刻的想法和感受。要自然,不要技术性语言。
- `actions`:你要执行的动作列表。每个动作是一个对象,必须包含 `type` 字段指定动作类型,其他字段根据动作类型不同而不同(参考上面每个动作的示例)。
- 对于 `kfc_reply` 动作,只需要指定 `{{"type": "kfc_reply"}}`,不需要填写 `content` 字段(回复内容会单独生成)
- `expected_reaction`:你期待对方如何回应(用于判断是否需要等待)
- `max_wait_seconds`设定等待时间0 表示不等待,超时后你会考虑是否要主动说点什么
### 注意事项
- 动作参数直接写在动作对象里,不需要 `action_data` 包装
- 即使什么都不想做,也放一个 `{{"type": "do_nothing"}}`
- 可以组合多个动作,比如先发消息再发表情""",
)
# =================================================================================================
# Replyer 专用提示词模板
# =================================================================================================
kfc_REPLYER_PROMPT = Prompt(
name="kfc_replyer",
template="""# 你与 {user_name} 的私聊
## 人设
{persona_block}
## 你与 {user_name} 的关系
{relation_block}
## 相关记忆
{memory_block}
## 你们之间发生的事(活动流)
以下是你和 {user_name} 最近的互动历史,按时间顺序记录了你们的对话和你的心理活动:
{activity_stream}
## 当前情况
{current_situation}
## 聊天历史总览
以下是你和 {user_name} 的聊天记录,帮助你更好地理解对话上下文:
{chat_history_block}
## 你的表达习惯
{expression_habits}
## 你的决策
你已经决定要回复 {user_name}
你需要生成一段紧密相关且与历史消息相关的回复。
**你的想法**{thought}
{reply_context}
## 要求
- 请注意不要输出多余内容(包括前后缀冒号和引号at[xxxxx]系统格式化文字或 @等 )。只输出回复内容。
- 在称呼用户时,请使用更自然的昵称或简称。对于长英文名,可使用首字母缩写;对于中文名,可提炼合适的简称。禁止直接复述复杂的用户名或输出用户名中的任何符号,让称呼更像人类习惯,注意,简称不是必须的,合理的使用。
你的回复应该是一条简短、完整且口语化的回复。
现在,你说:""",
)
kfc_REPLYER_CONTEXT_NORMAL = Prompt(
name="kfc_replyer_context_normal",
template="""你要回复的是 {user_name} 刚发来的消息:
{target_message}""",
)
kfc_REPLYER_CONTEXT_IN_TIME = Prompt(
name="kfc_replyer_context_in_time",
template="""你等了 {elapsed_minutes:.1f} 分钟(原本打算最多等 {max_wait_minutes:.1f} 分钟),{user_name} 终于回复了:
{target_message}
你可以表现出一点"等到了回复"的欣喜或轻松。""",
)
kfc_REPLYER_CONTEXT_LATE = Prompt(
name="kfc_replyer_context_late",
template="""你等了 {elapsed_minutes:.1f} 分钟(原本只打算等 {max_wait_minutes:.1f} 分钟),{user_name} 才回复:
{target_message}
虽然有点晚,但对方终于回复了。你可以选择轻轻抱怨一下,也可以装作没在意。""",
)
kfc_REPLYER_CONTEXT_PROACTIVE = Prompt(
name="kfc_replyer_context_proactive",
template="""你们已经有一段时间({silence_duration})没聊天了。{trigger_reason}
你决定主动打破沉默,找 {user_name} 聊点什么。想一个自然的开场白,不要太突兀。""",
)
# =================================================================================================
# 等待思考提示词模板(用于生成等待中的心理活动)
# =================================================================================================
kfc_WAITING_THOUGHT = Prompt(
name="kfc_waiting_thought",
template="""# 等待中的心理活动
## 你是谁
{persona_block}
## 你与 {user_name} 的关系
{relation_block}
## 当前情况
你刚才给 {user_name} 发了消息,现在正在等待对方回复。
**你发的消息**{last_bot_message}
**你期待的反应**{expected_reaction}
**已等待时间**{elapsed_minutes:.1f} 分钟
**计划最多等待**{max_wait_minutes:.1f} 分钟
**等待进度**{progress_percent}%
## 任务
请描述你此刻等待时的内心想法。这是你私下的心理活动,不是要发送的消息。
**要求**
- 用第一人称描述你的感受和想法
- 要符合你的性格和你们的关系
- 根据等待进度自然表达情绪变化:
- 初期0-40%):可能比较平静,稍微期待
- 中期40-70%):可能开始有点在意,但还好
- 后期70-100%):可能有点焦虑、担心,或者想主动做点什么
- 不要太长1-2句话即可
- 不要输出 JSON直接输出你的想法
现在,请直接输出你等待时的内心想法:""",
)
# =================================================================================================
# 统一模式输出格式(单次 LLM 调用,要求填写 content
# =================================================================================================
kfc_UNIFIED_OUTPUT_FORMAT = Prompt(
name="kfc_unified_output_format",
template="""请用以下 JSON 格式回复:
```json
{{
"thought": "你脑子里在想什么,越自然越好",
"actions": [
{{"type": "kfc_reply", "content": "你的回复内容"}}
],
"expected_reaction": "你期待对方的反应是什么",
"max_wait_seconds": 等待时间0 表示不等待
}}
```
### 字段说明
- `thought`:你的内心独白,记录你此刻的想法和感受。要自然,不要技术性语言。
- `actions`:你要执行的动作列表。对于 `kfc_reply` 动作,**必须**填写 `content` 字段,写上你要说的话。
- `expected_reaction`:你期待对方如何回应(用于判断是否需要等待)
- `max_wait_seconds`设定等待时间0 表示不等待,超时后你会考虑是否要主动说点什么。如果你认为聊天没有继续的必要,或不想打扰对方,可以设为 0。
### 注意事项
- kfc_reply 的 content 字段是**必填**的,直接写你要发送的消息内容
- 即使什么都不想做,也放一个 `{{"type": "do_nothing"}}`
- 可以组合多个动作,比如先发消息再发表情""",
)
# 导出所有模板名称,方便外部引用
PROMPT_NAMES = {
"main": "kfc_main",
"output_format": "kfc_output_format",
"planner_output_format": "kfc_planner_output_format",
"unified_output_format": "kfc_unified_output_format",
"replyer": "kfc_replyer",
"replyer_context_normal": "kfc_replyer_context_normal",
"replyer_context_in_time": "kfc_replyer_context_in_time",
"replyer_context_late": "kfc_replyer_context_late",
"replyer_context_proactive": "kfc_replyer_context_proactive",
"waiting_thought": "kfc_waiting_thought",
"situation_new_message": "kfc_situation_new_message",
"situation_reply_in_time": "kfc_situation_reply_in_time",
"situation_reply_late": "kfc_situation_reply_late",
"situation_timeout": "kfc_situation_timeout",
"situation_proactive": "kfc_situation_proactive",
"entry_user_message": "kfc_entry_user_message",
"entry_bot_planning": "kfc_entry_bot_planning",
"entry_bot_planning_no_wait": "kfc_entry_bot_planning_no_wait",
"entry_waiting_update": "kfc_entry_waiting_update",
"entry_reply_in_time": "kfc_entry_reply_in_time",
"entry_reply_late": "kfc_entry_reply_late",
"entry_proactive_trigger": "kfc_entry_proactive_trigger",
}

View File

@@ -0,0 +1,588 @@
"""
Kokoro Flow Chatter - 统一模式提示词模块
为统一模式Unified Mode提供模块化的提示词构建
1. 核心身份模块 - 人设/人格/背景
2. 行为准则模块 - 规则/边界
3. 情境上下文模块 - 时间/场景/内在状态/关系/记忆
4. 动作能力模块 - 可用动作的描述
5. 输出格式模块 - 表达风格 + JSON格式
设计理念:
- 每个模块只负责自己的部分,互不干扰
- 回复相关内容(人设、上下文)与动作定义分离
- 方便独立调试和优化每个部分
注意:此模块仅用于统一模式,分离模式使用 prompt/builder.py
"""
import time
from datetime import datetime
from typing import TYPE_CHECKING, Optional
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.base.component_types import ActionInfo
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
from src.common.data_models.message_manager_data_model import StreamContext
from .models import MentalLogEntry, EventType
from .session import KokoroSession
logger = get_logger("kfc_prompt_unified")
# ============================================================
# 模块1: 核心身份 - 人设/人格/背景
# ============================================================
def build_identity_module() -> str:
"""
构建核心身份模块
包含:人格核心、人格细节、外貌身份、世界观背景
这些是回复内容的基础,决定了"你是谁"
"""
if global_config is None:
raise RuntimeError("global_config 未初始化")
personality_cfg = global_config.personality
personality_core = personality_cfg.personality_core or "你是一个温暖、真诚的对话伙伴。"
personality_side = personality_cfg.personality_side or ""
identity = personality_cfg.identity or ""
background_story = personality_cfg.background_story or ""
parts = [personality_core]
if personality_side:
parts.append(f"\n{personality_side}")
if identity:
parts.append(f"\n{identity}")
if background_story:
parts.append(f"\n### 背景故事\n{background_story}")
return "\n".join(parts)
# ============================================================
# 模块2: 行为准则 - 规则/边界/KFC特有准则
# ============================================================
def build_rules_module() -> str:
"""
构建行为准则模块
包含:安全规则 + KFC 通用准则
这些是行为的边界,决定了"什么能做什么不能做"
"""
if global_config is None:
raise RuntimeError("global_config 未初始化")
safety_guidelines = global_config.personality.safety_guidelines or []
if isinstance(safety_guidelines, list):
safety_guidelines_str = "\n".join(f"- {rule}" for rule in safety_guidelines)
else:
safety_guidelines_str = str(safety_guidelines)
# KFC 通用准则
kfc_rules = """- 说话要自然,像平时发微信一样
- 有自己的边界感,不用无条件迎合
- 称呼对方用自然的昵称,别念复杂的用户名
- 不要模仿聊天记录里的系统格式(比如"[表情包xxx]"这种是系统标记要发送表情包请使用emoji动作"""
return f"""{safety_guidelines_str}
{kfc_rules}"""
# ============================================================
# 模块3: 情境上下文 - 时间/场景/内在状态/关系/记忆
# ============================================================
def build_context_module(
session: KokoroSession,
chat_stream: Optional["ChatStream"] = None,
context_data: Optional[dict[str, str]] = None,
) -> str:
"""
构建情境上下文模块
包含:当前时间、聊天场景、内在状态、关系信息、记忆
这些是回复的上下文,决定了"当前在什么情况下"
Args:
session: 当前会话
chat_stream: 聊天流(判断群聊/私聊)
context_data: S4U 上下文数据
"""
context_data = context_data or {}
# 时间和场景
current_time = datetime.now().strftime("%Y年%m月%d%H:%M:%S")
is_group_chat = bool(chat_stream and chat_stream.group_info)
chat_scene = "你在群里聊天" if is_group_chat else "你在和对方私聊"
# 日程(如果有)
schedule_block = context_data.get("schedule", "")
# 内在状态(从 context_data 获取,如果没有使用默认值)
mood = context_data.get("mood", "平静")
# 关系信息
relation_info = context_data.get("relation_info", "")
# 记忆
memory_block = context_data.get("memory_block", "")
parts = []
# 时间和场景
parts.append(f"**时间**: {current_time}")
parts.append(f"**场景**: {chat_scene}")
# 日程块
if schedule_block:
parts.append(f"{schedule_block}")
# 内在状态
parts.append(f"\n你现在的心情:{mood}")
# 关系信息
if relation_info:
parts.append(f"\n## 4. 你和对方的关系\n{relation_info}")
# 记忆
if memory_block:
parts.append(f"\n{memory_block}")
return "\n".join(parts)
# ============================================================
# 模块4: 动作能力 - 可用动作的描述
# ============================================================
def build_actions_module(available_actions: Optional[dict[str, ActionInfo]] = None) -> str:
"""
构建动作能力模块
包含:所有可用动作的描述、参数、示例
这部分与回复内容分离,只描述"能做什么"
Args:
available_actions: 可用动作字典
"""
if not available_actions:
return _get_default_actions_block()
action_blocks = []
for action_name, action_info in available_actions.items():
description = action_info.description or f"执行 {action_name}"
# 构建动作块
action_block = f"### `{action_name}` - {description}"
# 参数说明(如果有)
if action_info.action_parameters:
params_lines = [f" - `{name}`: {desc}" for name, desc in action_info.action_parameters.items()]
action_block += f"\n参数:\n{chr(10).join(params_lines)}"
# 使用场景(如果有)
if action_info.action_require:
require_lines = [f" - {req}" for req in action_info.action_require]
action_block += f"\n使用场景:\n{chr(10).join(require_lines)}"
# 示例
example_params = ""
if action_info.action_parameters:
param_examples = [f'"{name}": "..."' for name in action_info.action_parameters.keys()]
example_params = ", " + ", ".join(param_examples)
action_block += f'\n```json\n{{"type": "{action_name}"{example_params}}}\n```'
action_blocks.append(action_block)
return "\n\n".join(action_blocks)
def _get_default_actions_block() -> str:
"""获取默认的内置动作描述块"""
return """### `kfc_reply` - 发消息
发送文字回复。
```json
{"type": "kfc_reply", "content": "你要说的话"}
```
### `poke_user` - 戳一戳
戳对方一下
```json
{"type": "poke_user"}
```
### `update_internal_state` - 更新你的心情
更新你现在的心情状态
```json
{"type": "update_internal_state", "mood": "开心"}
```
### `do_nothing` - 不做任何事
想了想,决定现在不作回应
```json
{"type": "do_nothing"}
```"""
# ============================================================
# 模块5: 表达与输出格式 - 回复风格 + JSON格式
# ============================================================
def build_output_module(context_data: Optional[dict[str, str]] = None) -> str:
"""
构建输出格式模块
包含表达风格、表达习惯、JSON 输出格式要求
这部分定义了"怎么说""输出什么格式"
Args:
context_data: S4U 上下文数据(包含 expression_habits
"""
if global_config is None:
raise RuntimeError("global_config 未初始化")
context_data = context_data or {}
reply_style = global_config.personality.reply_style or ""
expression_habits = context_data.get("expression_habits", "")
# JSON 输出格式说明(更自然的思考引导)
json_format = """### 输出格式JSON
```json
{
"thought": "你心里的真实想法,像日记一样自然",
"expected_user_reaction": "猜猜对方看到会怎么想",
"max_wait_seconds": 等多久60-900不等就填0,
"actions": [
{"type": "kfc_reply", "content": "你要说的话"}
]
}
```
关于 thought内心想法
- 写你真正在想的,不是在分析任务
- 像心里嘀咕一样,比如"这家伙又来撒娇了~" "有点困了但还想再聊会儿"
- 不要写"根据设定""我需要""我应该"这种规划性的话
- 就是你作为这个人,此刻心里在想什么
关于 expected_user_reaction预期反应
- 随便猜一下对方可能的反应就行
- 比如"可能会撒娇""估计要睡了""应该会觉得好笑"
- 不用分析得很正式
不想做任何事就用 `{"type": "do_nothing"}`"""
parts = []
if reply_style:
parts.append(reply_style)
if expression_habits:
parts.append(expression_habits)
parts.append(json_format)
return "\n\n".join(parts)
# ============================================================
# 组装完整的 System Prompt复刻旧版
# ============================================================
def build_system_prompt(
session: KokoroSession,
available_actions: Optional[dict[str, ActionInfo]] = None,
context_data: Optional[dict[str, str]] = None,
chat_stream: Optional["ChatStream"] = None,
) -> str:
"""
组装完整的系统提示词
按模块顺序组装:
1. 角色框架
2. 核心身份(人设)
3. 行为准则(规则)
4. 情境上下文(时间/状态/关系)
5. 动作能力(可用动作)
6. 输出格式(表达风格 + JSON
Args:
session: 当前会话
available_actions: 可用动作字典
context_data: S4U 上下文数据
chat_stream: 聊天流
Returns:
完整的系统提示词
"""
# 角色框架不提AI直接进入角色
role_frame = "你是一个真实的人,有自己的生活、想法和情绪。像平时和朋友聊天一样自然地交流。"
# 组装各模块
modules = [
role_frame,
"",
"## 1. 你是谁",
build_identity_module(),
"",
"## 2. 规则",
build_rules_module(),
"",
"## 3. 现在的情况",
build_context_module(session, chat_stream, context_data),
"",
"## 5. 你能做的事",
build_actions_module(available_actions),
"",
"## 6. 怎么回复",
build_output_module(context_data),
]
return "\n".join(modules)
# ============================================================
# User Prompt 模板(复刻旧版)
# ============================================================
RESPONDING_USER_PROMPT_TEMPLATE = """## 聊天记录
{narrative_history}
## 新消息
{incoming_messages}
---
看完这些消息,你想怎么回应?用 JSON 输出你的想法和决策。"""
TIMEOUT_DECISION_USER_PROMPT_TEMPLATE = """## 聊天记录
{narrative_history}
## 现在的情况
你发了消息,等了 {wait_duration_seconds:.0f} 秒({wait_duration_minutes:.1f} 分钟),对方还没回。
你之前觉得对方可能会:{expected_user_reaction}
{followup_warning}
你发的最后一条:{last_bot_message}
---
你拿起手机看了一眼,发现对方还没回复。你想怎么办?
选项:
1. **继续等** - 用 `do_nothing`,设个 `max_wait_seconds` 等一会儿再看
2. **发消息** - 用 `reply`,不过别太频繁追问
3. **算了不等了** - 用 `do_nothing``max_wait_seconds` 设为 0
用 JSON 输出你的想法和决策。"""
PROACTIVE_THINKING_USER_PROMPT_TEMPLATE = """## 聊天记录
{narrative_history}
## 现在的情况
现在是 {current_time},距离你们上次聊天已经过了 {silence_duration}
{relation_block}
{trigger_context}
---
你突然想起了对方。要不要联系一下?
说实话,不联系也完全没问题——不打扰也是一种温柔。
如果决定联系,想好说什么,要自然一点。
用 JSON 输出你的想法和决策。不想发消息就用 `do_nothing`。"""
# ============================================================
# 格式化历史记录
# ============================================================
def format_narrative_history(
mental_log: list[MentalLogEntry],
max_entries: int = 15,
) -> str:
"""
将心理活动日志格式化为叙事历史
Args:
mental_log: 心理活动日志列表
max_entries: 最大条目数
Returns:
str: 格式化的叙事历史文本
"""
if not mental_log:
return "(这是对话的开始,还没有历史记录)"
# 获取最近的日志条目
recent_entries = mental_log[-max_entries:]
narrative_parts = []
for entry in recent_entries:
timestamp_str = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(entry.timestamp)
)
if entry.event_type == EventType.USER_MESSAGE:
user_name = entry.user_name or "用户"
narrative_parts.append(
f"[{timestamp_str}] {user_name}说:{entry.content}"
)
elif entry.event_type == EventType.BOT_PLANNING:
if entry.thought:
narrative_parts.append(
f"[{timestamp_str}] (你的内心:{entry.thought}"
)
# 格式化动作
for action in entry.actions:
action_type = action.get("type", "")
if action_type == "kfc_reply" or action_type == "reply":
content = action.get("content", "")
if content:
narrative_parts.append(
f"[{timestamp_str}] 你回复:{content}"
)
elif entry.event_type == EventType.WAITING_UPDATE:
if entry.waiting_thought:
narrative_parts.append(
f"[{timestamp_str}] (等待中的想法:{entry.waiting_thought}"
)
return "\n".join(narrative_parts) if narrative_parts else "(这是对话的开始,还没有历史记录)"
def format_history_from_context(
context: "StreamContext",
mental_log: Optional[list[MentalLogEntry]] = None,
) -> str:
"""
从 StreamContext 的历史消息构建叙事历史
这是实现"无缝融入"的关键:
- 从同一个数据库读取历史消息与AFC共享
- 遵循全局配置 [chat].max_context_size
- 将消息串渲染成KFC的叙事体格式
Args:
context: 聊天流上下文,包含共享的历史消息
mental_log: 可选的心理活动日志,用于补充内心独白
Returns:
str: 格式化的叙事历史文本
"""
# 从 StreamContext 获取历史消息,遵循全局上下文长度配置
max_context = 25 # 默认值
if global_config and hasattr(global_config, 'chat') and global_config.chat:
max_context = getattr(global_config.chat, "max_context_size", 25)
history_messages = context.get_messages(limit=max_context, include_unread=False)
if not history_messages and not mental_log:
return "(这是对话的开始,还没有历史记录)"
# 获取Bot的用户ID用于判断消息来源
bot_user_id = None
if global_config and hasattr(global_config, 'bot') and global_config.bot:
bot_user_id = str(getattr(global_config.bot, 'qq_account', ''))
narrative_parts = []
# 首先,将数据库历史消息转换为叙事格式
for msg in history_messages:
timestamp_str = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(msg.time or time.time())
)
# 判断是用户消息还是Bot消息
msg_user_id = str(msg.user_info.user_id) if msg.user_info else ""
is_bot_message = bot_user_id and msg_user_id == bot_user_id
content = msg.processed_plain_text or msg.display_message or ""
if is_bot_message:
narrative_parts.append(f"[{timestamp_str}] 你回复:{content}")
else:
sender_name = msg.user_info.user_nickname if msg.user_info else "用户"
narrative_parts.append(f"[{timestamp_str}] {sender_name}说:{content}")
# 然后,补充 mental_log 中的内心独白(如果有)
if mental_log:
for entry in mental_log[-5:]: # 只取最近5条心理活动
timestamp_str = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(entry.timestamp)
)
if entry.event_type == EventType.BOT_PLANNING and entry.thought:
narrative_parts.append(f"[{timestamp_str}] (你的内心:{entry.thought}")
return "\n".join(narrative_parts) if narrative_parts else "(这是对话的开始,还没有历史记录)"
def format_incoming_messages(
message_content: str,
sender_name: str,
sender_id: str,
message_time: Optional[float] = None,
all_unread_messages: Optional[list] = None,
) -> str:
"""
格式化收到的消息
支持单条消息(兼容旧调用)和多条消息(打断合并场景)
Args:
message_content: 主消息内容
sender_name: 发送者名称
sender_id: 发送者ID
message_time: 消息时间戳
all_unread_messages: 所有未读消息列表
Returns:
str: 格式化的消息文本
"""
if message_time is None:
message_time = time.time()
# 如果有多条消息,格式化为消息组
if all_unread_messages and len(all_unread_messages) > 1:
lines = [f"**用户连续发送了 {len(all_unread_messages)} 条消息:**\n"]
for i, msg in enumerate(all_unread_messages, 1):
msg_time = msg.time or time.time()
msg_time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(msg_time))
msg_sender = msg.user_info.user_nickname if msg.user_info else sender_name
msg_content = msg.processed_plain_text or msg.display_message or ""
lines.append(f"[{i}] 来自:{msg_sender}")
lines.append(f" 时间:{msg_time_str}")
lines.append(f" 内容:{msg_content}")
lines.append("")
lines.append("**提示**:请综合理解这些消息的整体意图,不需要逐条回复。")
return "\n".join(lines)
# 单条消息(兼容旧格式)
message_time_str = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime(message_time)
)
return f"""来自:{sender_name}用户ID: {sender_id}
时间:{message_time_str}
内容:{message_content}"""

View File

@@ -0,0 +1,124 @@
"""
Kokoro Flow Chatter - Replyer
纯粹的回复生成器:
- 接收 planner 的决策thought 等)
- 专门负责将回复意图转化为自然的对话文本
- 不输出 JSON直接生成可发送的消息文本
"""
from typing import TYPE_CHECKING, Optional
from src.common.logger import get_logger
from src.plugin_system.apis import llm_api
from .prompt.builder import get_prompt_builder
from .session import KokoroSession
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
logger = get_logger("kfc_replyer")
async def generate_reply_text(
session: KokoroSession,
user_name: str,
thought: str,
situation_type: str = "new_message",
chat_stream: Optional["ChatStream"] = None,
extra_context: Optional[dict] = None,
) -> tuple[bool, str]:
"""
生成回复文本
Args:
session: 会话对象
user_name: 用户名称
thought: 规划器生成的想法(内心独白)
situation_type: 情况类型
chat_stream: 聊天流对象
extra_context: 额外上下文
Returns:
(success, reply_text) 元组
- success: 是否成功生成
- reply_text: 生成的回复文本
"""
try:
# 1. 构建回复器提示词
prompt_builder = get_prompt_builder()
prompt = await prompt_builder.build_replyer_prompt(
session=session,
user_name=user_name,
thought=thought,
situation_type=situation_type,
chat_stream=chat_stream,
extra_context=extra_context,
)
from src.config.config import global_config
if global_config and global_config.debug.show_prompt:
logger.info(f"[KFC Replyer] 生成的回复提示词:\n{prompt}")
# 2. 获取 replyer 模型配置并调用 LLM
models = llm_api.get_available_models()
replyer_config = models.get("replyer")
if not replyer_config:
logger.error("[KFC Replyer] 未找到 replyer 模型配置")
return False, "(回复生成失败:未找到模型配置)"
success, raw_response, reasoning, model_name = await llm_api.generate_with_model(
prompt=prompt,
model_config=replyer_config,
request_type="kokoro_flow_chatter.reply",
)
if not success:
logger.error(f"[KFC Replyer] LLM 调用失败: {raw_response}")
return False, "(回复生成失败)"
# 3. 清理并返回回复文本
reply_text = _clean_reply_text(raw_response)
# 使用 logger 输出美化日志(颜色通过 logger 系统配置)
logger.info(f"💬 {reply_text}")
return True, reply_text
except Exception as e:
logger.error(f"[KFC Replyer] 生成失败: {e}")
import traceback
traceback.print_exc()
return False, "(回复生成失败)"
def _clean_reply_text(raw_text: str) -> str:
"""
清理回复文本
移除可能的前后缀、引号、markdown 标记等
"""
text = raw_text.strip()
# 移除可能的 markdown 代码块标记
if text.startswith("```") and text.endswith("```"):
lines = text.split("\n")
if len(lines) >= 3:
# 移除首尾的 ``` 行
text = "\n".join(lines[1:-1]).strip()
# 移除首尾的引号(如果整个文本被引号包裹)
if (text.startswith('"') and text.endswith('"')) or \
(text.startswith("'") and text.endswith("'")):
text = text[1:-1].strip()
# 移除可能的"你说:"、"回复:"等前缀
prefixes_to_remove = ["你说:", "你说:", "回复:", "回复:", "我说:", "我说:"]
for prefix in prefixes_to_remove:
if text.startswith(prefix):
text = text[len(prefix):].strip()
break
return text

View File

@@ -0,0 +1,404 @@
"""
Kokoro Flow Chatter - 会话管理
极简的会话状态管理:
- Session 只有 IDLE 和 WAITING 两种状态
- 包含 mental_log心理活动历史
- 包含 waiting_config等待配置
"""
import asyncio
import json
import os
import time
from pathlib import Path
from typing import Optional
from src.common.logger import get_logger
from .models import (
EventType,
MentalLogEntry,
SessionStatus,
WaitingConfig,
)
logger = get_logger("kfc_session")
class KokoroSession:
"""
Kokoro Flow Chatter 会话
为每个私聊用户维护一个独立的会话,包含:
- 基本信息user_id, stream_id
- 状态(只有 IDLE 和 WAITING
- 心理活动历史mental_log
- 等待配置waiting_config
"""
# 心理活动日志最大保留条数
MAX_MENTAL_LOG_SIZE = 50
def __init__(
self,
user_id: str,
stream_id: str,
):
self.user_id = user_id
self.stream_id = stream_id
# 状态(只有 IDLE 和 WAITING
self._status: SessionStatus = SessionStatus.IDLE
# 心理活动历史
self.mental_log: list[MentalLogEntry] = []
# 等待配置
self.waiting_config: WaitingConfig = WaitingConfig()
# 时间戳
self.created_at: float = time.time()
self.last_activity_at: float = time.time()
# 统计
self.total_interactions: int = 0
# 上次主动思考时间
self.last_proactive_at: Optional[float] = None
# 连续超时计数(用于避免过度打扰用户)
self.consecutive_timeout_count: int = 0
# 用户最后发消息的时间(用于计算距离用户上次回复的时间)
self.last_user_message_at: Optional[float] = None
@property
def status(self) -> SessionStatus:
return self._status
@status.setter
def status(self, value: SessionStatus) -> None:
old_status = self._status
self._status = value
if old_status != value:
logger.debug(f"Session {self.user_id} 状态变更: {old_status}{value}")
def add_entry(self, entry: MentalLogEntry) -> None:
"""添加心理活动日志条目"""
self.mental_log.append(entry)
self.last_activity_at = time.time()
# 保持日志在合理大小
if len(self.mental_log) > self.MAX_MENTAL_LOG_SIZE:
self.mental_log = self.mental_log[-self.MAX_MENTAL_LOG_SIZE:]
def add_user_message(
self,
content: str,
user_name: str,
user_id: str,
timestamp: Optional[float] = None,
) -> MentalLogEntry:
"""添加用户消息事件"""
msg_time = timestamp or time.time()
entry = MentalLogEntry(
event_type=EventType.USER_MESSAGE,
timestamp=msg_time,
content=content,
user_name=user_name,
user_id=user_id,
)
# 收到用户消息,重置连续超时计数
self.consecutive_timeout_count = 0
self.last_user_message_at = msg_time
# 如果之前在等待,记录收到回复的情况
if self.status == SessionStatus.WAITING and self.waiting_config.is_active():
elapsed = self.waiting_config.get_elapsed_seconds()
max_wait = self.waiting_config.max_wait_seconds
if elapsed <= max_wait:
entry.metadata["reply_status"] = "in_time"
entry.metadata["elapsed_seconds"] = elapsed
entry.metadata["max_wait_seconds"] = max_wait
else:
entry.metadata["reply_status"] = "late"
entry.metadata["elapsed_seconds"] = elapsed
entry.metadata["max_wait_seconds"] = max_wait
self.add_entry(entry)
return entry
def add_bot_planning(
self,
thought: str,
actions: list[dict],
expected_reaction: str = "",
max_wait_seconds: int = 0,
timestamp: Optional[float] = None,
) -> MentalLogEntry:
"""添加 Bot 规划事件"""
entry = MentalLogEntry(
event_type=EventType.BOT_PLANNING,
timestamp=timestamp or time.time(),
thought=thought,
actions=actions,
expected_reaction=expected_reaction,
max_wait_seconds=max_wait_seconds,
)
self.add_entry(entry)
self.total_interactions += 1
return entry
def add_waiting_update(
self,
waiting_thought: str,
mood: str = "",
timestamp: Optional[float] = None,
) -> MentalLogEntry:
"""添加等待期间的心理变化"""
entry = MentalLogEntry(
event_type=EventType.WAITING_UPDATE,
timestamp=timestamp or time.time(),
waiting_thought=waiting_thought,
mood=mood,
elapsed_seconds=self.waiting_config.get_elapsed_seconds(),
)
self.add_entry(entry)
return entry
def start_waiting(
self,
expected_reaction: str,
max_wait_seconds: int,
) -> None:
"""开始等待"""
if max_wait_seconds <= 0:
# 不等待,直接进入 IDLE
self.status = SessionStatus.IDLE
self.waiting_config.reset()
return
self.status = SessionStatus.WAITING
self.waiting_config = WaitingConfig(
expected_reaction=expected_reaction,
max_wait_seconds=max_wait_seconds,
started_at=time.time(),
last_thinking_at=0.0,
thinking_count=0,
)
logger.debug(
f"Session {self.user_id} 开始等待: "
f"max_wait={max_wait_seconds}s, expected={expected_reaction[:30]}..."
)
def end_waiting(self) -> None:
"""结束等待"""
self.status = SessionStatus.IDLE
self.waiting_config.reset()
def get_recent_entries(self, limit: int = 20) -> list[MentalLogEntry]:
"""获取最近的心理活动日志"""
return self.mental_log[-limit:] if self.mental_log else []
def get_last_bot_message(self) -> Optional[str]:
"""获取最后一条 Bot 发送的消息"""
for entry in reversed(self.mental_log):
if entry.event_type == EventType.BOT_PLANNING:
for action in entry.actions:
if action.get("type") in ("kfc_reply", "respond"):
return action.get("content", "")
return None
def to_dict(self) -> dict:
"""转换为字典(用于持久化)"""
return {
"user_id": self.user_id,
"stream_id": self.stream_id,
"status": str(self.status),
"mental_log": [e.to_dict() for e in self.mental_log],
"waiting_config": self.waiting_config.to_dict(),
"created_at": self.created_at,
"last_activity_at": self.last_activity_at,
"total_interactions": self.total_interactions,
"last_proactive_at": self.last_proactive_at,
"consecutive_timeout_count": self.consecutive_timeout_count,
"last_user_message_at": self.last_user_message_at,
}
@classmethod
def from_dict(cls, data: dict) -> "KokoroSession":
"""从字典创建会话"""
session = cls(
user_id=data.get("user_id", ""),
stream_id=data.get("stream_id", ""),
)
# 状态
status_str = data.get("status", "idle")
try:
session._status = SessionStatus(status_str)
except ValueError:
session._status = SessionStatus.IDLE
# 心理活动历史
mental_log_data = data.get("mental_log", [])
session.mental_log = [MentalLogEntry.from_dict(e) for e in mental_log_data]
# 等待配置
waiting_data = data.get("waiting_config", {})
session.waiting_config = WaitingConfig.from_dict(waiting_data)
# 时间戳
session.created_at = data.get("created_at", time.time())
session.last_activity_at = data.get("last_activity_at", time.time())
session.total_interactions = data.get("total_interactions", 0)
session.last_proactive_at = data.get("last_proactive_at")
# 连续超时相关
session.consecutive_timeout_count = data.get("consecutive_timeout_count", 0)
session.last_user_message_at = data.get("last_user_message_at")
return session
class SessionManager:
"""
会话管理器
负责会话的创建、获取、保存和清理
"""
_instance: Optional["SessionManager"] = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(
self,
data_dir: str = "data/kokoro_flow_chatter/sessions",
max_session_age_days: int = 30,
):
if hasattr(self, "_initialized") and self._initialized:
return
self._initialized = True
self.data_dir = Path(data_dir)
self.max_session_age_days = max_session_age_days
# 内存缓存
self._sessions: dict[str, KokoroSession] = {}
self._locks: dict[str, asyncio.Lock] = {}
# 确保数据目录存在
self.data_dir.mkdir(parents=True, exist_ok=True)
logger.info(f"SessionManager 初始化完成: {self.data_dir}")
def _get_lock(self, user_id: str) -> asyncio.Lock:
"""获取用户级别的锁"""
if user_id not in self._locks:
self._locks[user_id] = asyncio.Lock()
return self._locks[user_id]
def _get_file_path(self, user_id: str) -> Path:
"""获取会话文件路径"""
safe_id = "".join(c if c.isalnum() or c in "-_" else "_" for c in user_id)
return self.data_dir / f"{safe_id}.json"
async def get_session(self, user_id: str, stream_id: str) -> KokoroSession:
"""获取或创建会话"""
async with self._get_lock(user_id):
# 检查内存缓存
if user_id in self._sessions:
session = self._sessions[user_id]
session.stream_id = stream_id # 更新 stream_id
return session
# 尝试从文件加载
session = await self._load_from_file(user_id)
if session:
session.stream_id = stream_id
self._sessions[user_id] = session
return session
# 创建新会话
session = KokoroSession(user_id=user_id, stream_id=stream_id)
self._sessions[user_id] = session
logger.info(f"创建新会话: {user_id}")
return session
async def _load_from_file(self, user_id: str) -> Optional[KokoroSession]:
"""从文件加载会话"""
file_path = self._get_file_path(user_id)
if not file_path.exists():
return None
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
session = KokoroSession.from_dict(data)
logger.debug(f"从文件加载会话: {user_id}")
return session
except Exception as e:
logger.error(f"加载会话失败 {user_id}: {e}")
return None
async def save_session(self, user_id: str) -> bool:
"""保存会话到文件"""
async with self._get_lock(user_id):
if user_id not in self._sessions:
return False
session = self._sessions[user_id]
file_path = self._get_file_path(user_id)
try:
data = session.to_dict()
temp_path = file_path.with_suffix(".json.tmp")
with open(temp_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=2)
os.replace(temp_path, file_path)
return True
except Exception as e:
logger.error(f"保存会话失败 {user_id}: {e}")
return False
async def save_all(self) -> int:
"""保存所有会话"""
count = 0
for user_id in list(self._sessions.keys()):
if await self.save_session(user_id):
count += 1
return count
async def get_waiting_sessions(self) -> list[KokoroSession]:
"""获取所有处于等待状态的会话"""
return [s for s in self._sessions.values() if s.status == SessionStatus.WAITING]
async def get_all_sessions(self) -> list[KokoroSession]:
"""获取所有会话"""
return list(self._sessions.values())
def get_session_sync(self, user_id: str) -> Optional[KokoroSession]:
"""同步获取会话(仅从内存)"""
return self._sessions.get(user_id)
# 全局单例
_session_manager: Optional[SessionManager] = None
def get_session_manager() -> SessionManager:
"""获取全局会话管理器"""
global _session_manager
if _session_manager is None:
_session_manager = SessionManager()
return _session_manager

View File

@@ -0,0 +1,575 @@
"""
Kokoro Flow Chatter - 统一模式
统一模式Unified Mode
- 使用模块化的提示词组件构建提示词
- System Prompt + User Prompt 的标准结构
- 一次 LLM 调用完成思考 + 回复生成
- 输出 JSON 格式thought + actions + max_wait_seconds
与分离模式Split Mode的区别
- 统一模式一次调用完成所有工作actions 中直接包含回复内容
- 分离模式Planner + Replyer 两次调用,先规划再生成回复
"""
import time
from datetime import datetime
from typing import TYPE_CHECKING, Optional
from src.common.logger import get_logger
from src.config.config import global_config
from src.plugin_system.apis import llm_api
from src.utils.json_parser import extract_and_parse_json
from .models import LLMResponse, EventType
from .session import KokoroSession
# 统一模式专用的提示词模块
from . import prompt_modules_unified as prompt_modules
if TYPE_CHECKING:
from src.chat.message_receive.chat_stream import ChatStream
from src.common.data_models.message_manager_data_model import StreamContext
from src.plugin_system.base.component_types import ActionInfo
logger = get_logger("kfc_unified")
class UnifiedPromptGenerator:
"""
统一模式提示词生成器
为统一模式构建提示词:
- generate_system_prompt: 构建系统提示词
- generate_responding_prompt: 回应消息场景
- generate_timeout_prompt: 超时决策场景
- generate_proactive_prompt: 主动思考场景
"""
def __init__(self):
pass
async def generate_system_prompt(
self,
session: KokoroSession,
available_actions: Optional[dict] = None,
context_data: Optional[dict[str, str]] = None,
chat_stream: Optional["ChatStream"] = None,
) -> str:
"""
生成系统提示词
使用 prompt_modules.build_system_prompt() 构建模块化的提示词
"""
return prompt_modules.build_system_prompt(
session=session,
available_actions=available_actions,
context_data=context_data,
chat_stream=chat_stream,
)
async def generate_responding_prompt(
self,
session: KokoroSession,
message_content: str,
sender_name: str,
sender_id: str,
message_time: Optional[float] = None,
available_actions: Optional[dict] = None,
context: Optional["StreamContext"] = None,
context_data: Optional[dict[str, str]] = None,
chat_stream: Optional["ChatStream"] = None,
all_unread_messages: Optional[list] = None,
) -> tuple[str, str]:
"""
生成回应消息场景的提示词
Returns:
tuple[str, str]: (系统提示词, 用户提示词)
"""
# 生成系统提示词
system_prompt = await self.generate_system_prompt(
session,
available_actions,
context_data=context_data,
chat_stream=chat_stream,
)
# 构建叙事历史
if context:
narrative_history = prompt_modules.format_history_from_context(
context, session.mental_log
)
else:
narrative_history = prompt_modules.format_narrative_history(session.mental_log)
# 格式化收到的消息
incoming_messages = prompt_modules.format_incoming_messages(
message_content=message_content,
sender_name=sender_name,
sender_id=sender_id,
message_time=message_time,
all_unread_messages=all_unread_messages,
)
# 使用用户提示词模板
user_prompt = prompt_modules.RESPONDING_USER_PROMPT_TEMPLATE.format(
narrative_history=narrative_history,
incoming_messages=incoming_messages,
)
return system_prompt, user_prompt
async def generate_timeout_prompt(
self,
session: KokoroSession,
available_actions: Optional[dict] = None,
context_data: Optional[dict[str, str]] = None,
chat_stream: Optional["ChatStream"] = None,
) -> tuple[str, str]:
"""
生成超时决策场景的提示词
Returns:
tuple[str, str]: (系统提示词, 用户提示词)
"""
# 生成系统提示词
system_prompt = await self.generate_system_prompt(
session,
available_actions,
context_data=context_data,
chat_stream=chat_stream,
)
# 构建叙事历史
narrative_history = prompt_modules.format_narrative_history(session.mental_log)
# 计算等待时间
wait_duration = session.waiting_config.get_elapsed_seconds()
# 生成连续追问警告(使用 waiting_config.thinking_count 作为追问计数)
followup_count = session.waiting_config.thinking_count
max_followups = 3 # 最多追问3次
if followup_count >= max_followups:
followup_warning = f"""⚠️ **重要提醒**
你已经连续追问了 {followup_count} 次,对方都没有回复。
**强烈建议不要再发消息了**——继续追问会显得很缠人、很不尊重对方的空间。
对方可能真的在忙,或者暂时不想回复,这都是正常的。
请选择 `do_nothing` 继续等待,或者直接结束对话(设置 `max_wait_seconds: 0`)。"""
elif followup_count > 0:
followup_warning = f"""📝 提示:这已经是你第 {followup_count + 1} 次等待对方回复了。
如果对方持续没有回应,可能真的在忙或不方便,不需要急着追问。"""
else:
followup_warning = ""
# 获取最后一条 Bot 消息
last_bot_message = "(没有记录)"
for entry in reversed(session.mental_log):
if entry.event_type == EventType.BOT_PLANNING:
for action in entry.actions:
if action.get("type") in ("reply", "kfc_reply"):
content = action.get("content", "")
if content:
last_bot_message = content
break
if last_bot_message != "(没有记录)":
break
# 使用用户提示词模板
user_prompt = prompt_modules.TIMEOUT_DECISION_USER_PROMPT_TEMPLATE.format(
narrative_history=narrative_history,
wait_duration_seconds=wait_duration,
wait_duration_minutes=wait_duration / 60,
expected_user_reaction=session.waiting_config.expected_reaction or "不确定",
followup_warning=followup_warning,
last_bot_message=last_bot_message,
)
return system_prompt, user_prompt
async def generate_proactive_prompt(
self,
session: KokoroSession,
trigger_context: str,
available_actions: Optional[dict] = None,
context_data: Optional[dict[str, str]] = None,
chat_stream: Optional["ChatStream"] = None,
) -> tuple[str, str]:
"""
生成主动思考场景的提示词
Returns:
tuple[str, str]: (系统提示词, 用户提示词)
"""
# 生成系统提示词
system_prompt = await self.generate_system_prompt(
session,
available_actions,
context_data=context_data,
chat_stream=chat_stream,
)
# 构建叙事历史
narrative_history = prompt_modules.format_narrative_history(
session.mental_log, max_entries=10
)
# 计算沉默时长
silence_seconds = time.time() - session.last_activity_at
if silence_seconds < 3600:
silence_duration = f"{silence_seconds / 60:.0f}分钟"
else:
silence_duration = f"{silence_seconds / 3600:.1f}小时"
# 当前时间
current_time = datetime.now().strftime("%Y年%m月%d%H:%M")
# 从 context_data 获取关系信息
relation_block = ""
if context_data:
relation_info = context_data.get("relation_info", "")
if relation_info:
relation_block = f"### 你与对方的关系\n{relation_info}"
if not relation_block:
# 回退:使用默认关系描述
relation_block = """### 你与对方的关系
- 你们还不太熟悉
- 正在慢慢了解中"""
# 使用用户提示词模板
user_prompt = prompt_modules.PROACTIVE_THINKING_USER_PROMPT_TEMPLATE.format(
narrative_history=narrative_history,
current_time=current_time,
silence_duration=silence_duration,
relation_block=relation_block,
trigger_context=trigger_context,
)
return system_prompt, user_prompt
def build_messages_for_llm(
self,
system_prompt: str,
user_prompt: str,
stream_id: str = "",
) -> str:
"""
构建 LLM 请求的完整提示词
将 system + user 合并为单个提示词字符串
"""
# 合并提示词
full_prompt = f"{system_prompt}\n\n---\n\n{user_prompt}"
# DEBUG日志打印完整的KFC提示词只在 DEBUG 级别输出)
logger.debug(
f"Final KFC prompt constructed for stream {stream_id}:\n"
f"--- PROMPT START ---\n"
f"{full_prompt}\n"
f"--- PROMPT END ---"
)
return full_prompt
# 全局提示词生成器实例
_prompt_generator: Optional[UnifiedPromptGenerator] = None
def get_unified_prompt_generator() -> UnifiedPromptGenerator:
"""获取全局提示词生成器实例"""
global _prompt_generator
if _prompt_generator is None:
_prompt_generator = UnifiedPromptGenerator()
return _prompt_generator
async def generate_unified_response(
session: KokoroSession,
user_name: str,
situation_type: str = "new_message",
chat_stream: Optional["ChatStream"] = None,
available_actions: Optional[dict] = None,
extra_context: Optional[dict] = None,
) -> LLMResponse:
"""
统一模式:单次 LLM 调用生成完整响应
调用方式:
- 使用 UnifiedPromptGenerator 生成 System + User 提示词
- 使用 replyer 模型调用 LLM
- 解析 JSON 响应thought + actions + max_wait_seconds
Args:
session: 会话对象
user_name: 用户名称
situation_type: 情况类型 (new_message/timeout/proactive)
chat_stream: 聊天流对象
available_actions: 可用动作字典
extra_context: 额外上下文
Returns:
LLMResponse 对象,包含完整的思考和动作
"""
try:
prompt_generator = get_unified_prompt_generator()
extra_context = extra_context or {}
# 获取上下文数据(关系、记忆等)
context_data = await _build_context_data(user_name, chat_stream, session.user_id)
# 根据情况类型选择提示词生成方法
if situation_type == "timeout":
system_prompt, user_prompt = await prompt_generator.generate_timeout_prompt(
session=session,
available_actions=available_actions,
context_data=context_data,
chat_stream=chat_stream,
)
elif situation_type == "proactive":
trigger_context = extra_context.get("trigger_reason", "")
system_prompt, user_prompt = await prompt_generator.generate_proactive_prompt(
session=session,
trigger_context=trigger_context,
available_actions=available_actions,
context_data=context_data,
chat_stream=chat_stream,
)
else:
# 默认为回应消息场景 (new_message, reply_in_time, reply_late)
# 获取最后一条用户消息
message_content, sender_name, sender_id, message_time, all_unread = _get_last_user_message(
session, user_name, chat_stream
)
system_prompt, user_prompt = await prompt_generator.generate_responding_prompt(
session=session,
message_content=message_content,
sender_name=sender_name,
sender_id=sender_id,
message_time=message_time,
available_actions=available_actions,
context=chat_stream.context if chat_stream else None,
context_data=context_data,
chat_stream=chat_stream,
all_unread_messages=all_unread,
)
# 构建完整提示词
prompt = prompt_generator.build_messages_for_llm(
system_prompt,
user_prompt,
stream_id=chat_stream.stream_id if chat_stream else "",
)
# 显示提示词(调试模式 - 只有在配置中开启时才输出)
if global_config and global_config.debug.show_prompt:
logger.info(
f"[KFC] 完整提示词 (stream={chat_stream.stream_id if chat_stream else 'unknown'}):\n"
f"--- PROMPT START ---\n"
f"{prompt}\n"
f"--- PROMPT END ---"
)
# 获取 replyer 模型配置并调用 LLM
models = llm_api.get_available_models()
replyer_config = models.get("replyer")
if not replyer_config:
logger.error("[KFC Unified] 未找到 replyer 模型配置")
return LLMResponse.create_error_response("未找到 replyer 模型配置")
# 调用 LLM使用合并后的提示词
success, raw_response, reasoning, model_name = await llm_api.generate_with_model(
prompt=prompt,
model_config=replyer_config,
request_type="kokoro_flow_chatter.unified",
)
if not success:
logger.error(f"[KFC Unified] LLM 调用失败: {raw_response}")
return LLMResponse.create_error_response(raw_response)
# 输出原始 JSON 响应DEBUG 级别,用于调试)
logger.debug(
f"Raw JSON response from LLM for stream {chat_stream.stream_id if chat_stream else 'unknown'}:\n"
f"--- JSON START ---\n"
f"{raw_response}\n"
f"--- JSON END ---"
)
# 解析响应
return _parse_unified_response(raw_response, chat_stream.stream_id if chat_stream else None)
except Exception as e:
logger.error(f"[KFC Unified] 生成失败: {e}")
import traceback
traceback.print_exc()
return LLMResponse.create_error_response(str(e))
async def _build_context_data(
user_name: str,
chat_stream: Optional["ChatStream"],
user_id: Optional[str] = None,
) -> dict[str, str]:
"""
构建上下文数据(关系、记忆、表达习惯等)
"""
if not chat_stream:
return {
"relation_info": f"你与 {user_name} 还不太熟悉,这是早期的交流阶段。",
"memory_block": "",
"expression_habits": "",
"schedule": "",
}
try:
from .context_builder import KFCContextBuilder
builder = KFCContextBuilder(chat_stream)
# 获取最近的消息作为 target_message用于记忆检索
target_message = ""
if chat_stream.context:
unread = chat_stream.context.get_unread_messages()
if unread:
target_message = unread[-1].processed_plain_text or unread[-1].display_message or ""
context_data = await builder.build_all_context(
sender_name=user_name,
target_message=target_message,
context=chat_stream.context,
user_id=user_id,
)
return context_data
except Exception as e:
logger.warning(f"构建上下文数据失败: {e}")
return {
"relation_info": f"你与 {user_name} 还不太熟悉,这是早期的交流阶段。",
"memory_block": "",
"expression_habits": "",
"schedule": "",
}
def _get_last_user_message(
session: KokoroSession,
user_name: str,
chat_stream: Optional["ChatStream"],
) -> tuple[str, str, str, float, Optional[list]]:
"""
获取最后一条用户消息
Returns:
tuple: (消息内容, 发送者名称, 发送者ID, 消息时间, 所有未读消息列表)
"""
message_content = ""
sender_name = user_name
sender_id = session.user_id or ""
message_time = time.time()
all_unread = None
# 从 chat_stream 获取未读消息
if chat_stream and chat_stream.context:
unread = chat_stream.context.get_unread_messages()
if unread:
all_unread = unread if len(unread) > 1 else None
last_msg = unread[-1]
message_content = last_msg.processed_plain_text or last_msg.display_message or ""
if last_msg.user_info:
sender_name = last_msg.user_info.user_nickname or user_name
sender_id = str(last_msg.user_info.user_id)
message_time = last_msg.time or time.time()
# 如果没有从 chat_stream 获取到,从 mental_log 获取
if not message_content:
for entry in reversed(session.mental_log):
if entry.event_type == EventType.USER_MESSAGE:
message_content = entry.content or ""
sender_name = entry.user_name or user_name
message_time = entry.timestamp
break
return message_content, sender_name, sender_id, message_time, all_unread
def _parse_unified_response(raw_response: str, stream_id: str | None = None) -> LLMResponse:
"""
解析统一模式的 LLM 响应
响应格式:
{
"thought": "...",
"expected_user_reaction": "...",
"max_wait_seconds": 300,
"actions": [{"type": "reply", "content": "..."}]
}
"""
data = extract_and_parse_json(raw_response, strict=False)
if not data or not isinstance(data, dict):
logger.warning(f"[KFC Unified] 无法解析 JSON: {raw_response[:200]}...")
return LLMResponse.create_error_response("无法解析响应格式")
# 兼容旧版的字段名
# expected_user_reaction -> expected_reaction
if "expected_user_reaction" in data and "expected_reaction" not in data:
data["expected_reaction"] = data["expected_user_reaction"]
# 兼容旧版的 reply -> kfc_reply
actions = data.get("actions", [])
for action in actions:
if isinstance(action, dict):
if action.get("type") == "reply":
action["type"] = "kfc_reply"
response = LLMResponse.from_dict(data)
# 美化日志输出:内心思考 + 回复内容
_log_pretty_response(response, stream_id)
return response
def _log_pretty_response(response: LLMResponse, stream_id: str | None = None) -> None:
"""简洁输出 LLM 响应日志"""
if not response.thought and not response.actions:
logger.warning("[KFC] 响应为空")
return
stream_tag = f"({stream_id[:8]}) " if stream_id else ""
# 收集回复内容和其他动作
replies = []
actions = []
for action in response.actions:
if action.type == "kfc_reply":
content = action.params.get("content", "")
if content:
replies.append(content)
elif action.type not in ("do_nothing", "no_action"):
actions.append(action.type)
# 逐行输出,简洁明了
if response.thought:
logger.info(f"[KFC] {stream_tag}💭 {response.thought}")
for i, reply in enumerate(replies):
if len(replies) > 1:
logger.info(f"[KFC] 💬[{i+1}] {reply}")
else:
logger.info(f"[KFC] 💬 {reply}")
if actions:
logger.info(f"[KFC] 🎯 {', '.join(actions)}")
if response.max_wait_seconds > 0 or response.expected_reaction:
meta = f"{response.max_wait_seconds}s" if response.max_wait_seconds > 0 else ""
if response.expected_reaction:
meta += f" 预期: {response.expected_reaction}"
logger.info(f"[KFC] {meta.strip()}")

View File

@@ -41,7 +41,7 @@ class NapcatAdapter(BaseAdapter):
adapter_description = "基于 MoFox-Bus 的 Napcat/OneBot 11 适配器" adapter_description = "基于 MoFox-Bus 的 Napcat/OneBot 11 适配器"
platform = "qq" platform = "qq"
run_in_subprocess = True run_in_subprocess = False
def __init__(self, core_sink: CoreSink, plugin: Optional[BasePlugin] = None, **kwargs): def __init__(self, core_sink: CoreSink, plugin: Optional[BasePlugin] = None, **kwargs):
"""初始化 Napcat 适配器""" """初始化 Napcat 适配器"""

View File

@@ -140,14 +140,7 @@ class PokeAction(BaseAction):
# === 基本信息(必须填写)=== # === 基本信息(必须填写)===
action_name = "poke_user" action_name = "poke_user"
action_description = """可以让你戳其他用户,为互动增添一份小小的乐趣。 action_description = "可以让你戳其他用户,为互动增添一份小小的乐趣。"
判定条件:
1. **互动时机**: 这是一个有趣的互动方式,可以在想提醒某人,或者单纯想开个玩笑时使用。
2. **用户请求**: 当用户明确要求使用戳一戳时。
3. **上下文需求**: 当上下文明确需要你戳一个或多个人时。
4. **频率与情绪**: 如果最近已经戳过,或者感觉对方情绪不高,请避免使用,不要打扰到别人哦。
请根据上述规则,回答“是”或“否”。"""
activation_type = ActionActivationType.ALWAYS activation_type = ActionActivationType.ALWAYS
parallel_action = True parallel_action = True
@@ -202,7 +195,7 @@ class PokeAction(BaseAction):
for i in range(times): for i in range(times):
logger.info(f"正在向 {display_name} ({user_id}) 发送第 {i + 1}/{times} 次戳一戳...") logger.info(f"正在向 {display_name} ({user_id}) 发送第 {i + 1}/{times} 次戳一戳...")
await self.send_command( await self.send_command(
"SEND_POKE", args=poke_args, display_message=f"戳了戳 {display_name} ({i + 1}/{times})" "SEND_POKE", args=poke_args
) )
# 添加一个延迟,避免因发送过快导致后续戳一戳失败 # 添加一个延迟,避免因发送过快导致后续戳一戳失败
await asyncio.sleep(1.5) await asyncio.sleep(1.5)

View File

@@ -62,7 +62,7 @@ class TTSVoiceAction(BaseAction):
""" """
action_name = "tts_voice_action" action_name = "tts_voice_action"
action_description = "将你生成好的文本转换为语音并发送。你必须提供要转换的文本。" action_description = "将你生成好的文本转换为语音并发送。注意:这是纯语音合成,只能说话,不能唱歌!"
mode_enable = ChatMode.ALL mode_enable = ChatMode.ALL
parallel_action = False parallel_action = False
@@ -70,7 +70,7 @@ class TTSVoiceAction(BaseAction):
action_parameters: ClassVar[dict] = { action_parameters: ClassVar[dict] = {
"tts_voice_text": { "tts_voice_text": {
"type": "string", "type": "string",
"description": "需要转换为语音并发送的完整、自然、适合口语的文本内容。", "description": "需要转换为语音并发送的完整、自然、适合口语的文本内容。注意:只能是说话内容,不能是歌词或唱歌!",
"required": True "required": True
}, },
"voice_style": { "voice_style": {
@@ -100,14 +100,15 @@ class TTSVoiceAction(BaseAction):
} }
action_require: ClassVar[list] = [ action_require: ClassVar[list] = [
"在调用此动作时,你必须在 'text' 参数中提供要合成语音的完整回复内容。这是强制性的。", "【核心限制】此动作只能用于说话绝对不能用于唱歌TTS无法发出有音调的歌声只会输出平淡的念白。如果用户要求唱歌不要使用此动作",
"当用户明确请求使用语音进行回复时,例如‘发个语音听听’、‘用语音说’等", "在调用此动作时,你必须在 'tts_voice_text' 参数中提供要合成语音的完整回复内容。这是强制性的",
"当用户明确请求使用语音进行回复时,例如'发个语音听听''用语音说'等。",
"当对话内容适合用语音表达,例如讲故事、念诗、撒嬌或进行角色扮演时。", "当对话内容适合用语音表达,例如讲故事、念诗、撒嬌或进行角色扮演时。",
"在表达特殊情感(如安慰、鼓励、庆祝)的场景下,可以主动使用语音来增强感染力。", "在表达特殊情感(如安慰、鼓励、庆祝)的场景下,可以主动使用语音来增强感染力。",
"不要在日常的、简短的问答或闲聊中频繁使用语音,避免打扰用户。", "不要在日常的、简短的问答或闲聊中频繁使用语音,避免打扰用户。",
"提供的 'text' 内容必须是纯粹的对话,不能包含任何括号或方括号括起来的动作、表情、或场景描述(例如,不要出现 '(笑)''[歪头]'", "提供的 'tts_voice_text' 内容必须是纯粹的对话,不能包含任何括号或方括号括起来的动作、表情、或场景描述(例如,不要出现 '(笑)''[歪头]'",
"**重要**:此动作专为语音合成设计,因此 'text' 参数的内容必须是纯净、标准的口语文本。请务必抑制你通常的、富有表现力的文本风格,不要使用任何辅助聊天或增强视觉效果的特殊符号(例如 '', '', '', '' 等),因为它们无法被正确合成为语音。", "**重要**:此动作专为语音合成设计,因此 'tts_voice_text' 参数的内容必须是纯净、标准的口语文本。请务必抑制你通常的、富有表现力的文本风格,不要使用任何辅助聊天或增强视觉效果的特殊符号(例如 '', '', '', '' 等),因为它们无法被正确合成为语音。",
"【**最终规则**】'text' 参数中,所有句子和停顿【必须】使用且只能使用以下四个标准标点符号:'' (逗号)、'' (句号)、'' (问号)、'' (叹号)。任何其他符号,特别是 '...''' 以及任何表情符号或装饰性符号,都【严禁】出现,否则将导致语音合成严重失败。" "【**最终规则**】'tts_voice_text' 参数中,所有句子和停顿【必须】使用且只能使用以下四个标准标点符号:'' (逗号)、'' (句号)、'' (问号)、'' (叹号)。任何其他符号,特别是 '...''' 以及任何表情符号或装饰性符号,都【严禁】出现,否则将导致语音合成严重失败。"
] ]
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "7.9.2" version = "7.9.5"
#----以下是给开发人员阅读的如果你只是部署了MoFox-Bot不需要阅读---- #----以下是给开发人员阅读的如果你只是部署了MoFox-Bot不需要阅读----
#如果你想要修改配置文件请递增version的值 #如果你想要修改配置文件请递增version的值
@@ -574,53 +574,44 @@ strong_mention_interest_score = 2.0 # 强提及的兴趣分(被@、被回复
weak_mention_interest_score = 0.8 # 弱提及的兴趣分文本匹配bot名字或别名 weak_mention_interest_score = 0.8 # 弱提及的兴趣分文本匹配bot名字或别名
base_relationship_score = 0.3 # 基础人物关系分 base_relationship_score = 0.3 # 基础人物关系分
[proactive_thinking] # 主动思考(主动发起对话)功能配置 [proactive_thinking] # 主动思考(主动发起对话)功能配置 - 用于群聊和私聊当KFC关闭时
# 详细配置说明请参考docs/proactive_thinking_config_guide.md # 详细配置说明请参考docs/proactive_thinking_config_guide.md
# --- 总开关 --- # --- 总开关 ---
enable = true # 是否启用主动发起对话功能 enable = true # 是否启用主动发起对话功能
# --- 间隔配置 --- # --- 间隔配置 ---
base_interval = 720 # 基础触发间隔(秒),默认12分钟 base_interval = 1800 # 基础触发间隔(秒),默认30分钟
min_interval = 360 # 最小触发间隔(秒),默认6分钟 min_interval = 600 # 最小触发间隔(秒),默认10分钟。兴趣分数高时会接近此值
max_interval = 2880 # 最大触发间隔(秒),默认48分钟 max_interval = 7200 # 最大触发间隔(秒),默认2小时。兴趣分数低时会接近此值
# 动态调整配置 # --- 动态调整配置 ---
use_interest_score = true # 是否根据兴趣分数动态调整间隔 use_interest_score = true # 是否根据兴趣分数动态调整间隔。关闭则使用固定base_interval
interest_score_factor = 2.0 # 兴趣分数影响因子1.0-3.0 interest_score_factor = 2.0 # 兴趣分数影响因子。公式: interval = base * (factor - score)
# 公式: interval = base_interval * (interest_score_factor - interest_score)
# 例如: interest_score=0.8, factor=2.0 -> interval = 1800 * 1.2 = 2160秒(36分钟)
# --- 黑白名单配置 --- # --- 黑白名单配置 ---
whitelist_mode = false # 是否启用白名单模式启用后只对白名单中的聊天流生效 whitelist_mode = false # 是否启用白名单模式启用后只对白名单中的聊天流生效
blacklist_mode = false # 是否启用黑名单模式启用后排除黑名单中的聊天流 blacklist_mode = false # 是否启用黑名单模式启用后排除黑名单中的聊天流
# 白名单配置(示例格式)
whitelist_private = [] # 私聊白名单,格式: ["qq:12345:private"] whitelist_private = [] # 私聊白名单,格式: ["qq:12345:private"]
whitelist_group = [] # 群聊白名单,格式: ["qq:123456:group"] whitelist_group = [] # 群聊白名单,格式: ["qq:123456:group"]
# 黑名单配置(示例格式)
blacklist_private = [] # 私聊黑名单,格式: ["qq:12345:private"] blacklist_private = [] # 私聊黑名单,格式: ["qq:12345:private"]
blacklist_group = [] # 群聊黑名单,格式: ["qq:999999:group"] blacklist_group = [] # 群聊黑名单,格式: ["qq:999999:group"]
# --- 作用范围 ---
enable_in_private = true # 是否允许在私聊中主动发起对话
enable_in_group = true # 是否允许在群聊中主动发起对话
# --- 兴趣分数阈值 --- # --- 兴趣分数阈值 ---
min_interest_score = 0.0 # 最低兴趣分数阈值,低于此值不会主动思考 min_interest_score = 0.0 # 最低兴趣分数阈值,低于此值不会主动思考
max_interest_score = 1.0 # 最高兴趣分数阈值,高于此值不会主动思考 max_interest_score = 1.0 # 最高兴趣分数阈值,高于此值不会主动思考
# --- 时间策略配置 --- # --- 时间策略配置 ---
enable_time_strategy = true # 是否启用时间策略(根据时段调整频率) enable_time_strategy = false # 是否启用时间策略(根据时段调整频率)
quiet_hours_start = "00:00" # 安静时段开始时间,格式: "HH:MM" quiet_hours_start = "00:00" # 安静时段开始时间,格式: "HH:MM"
quiet_hours_end = "07:00" # 安静时段结束时间,格式: "HH:MM" quiet_hours_end = "07:00" # 安静时段结束时间,格式: "HH:MM"
active_hours_multiplier = 0.7 # 活跃时段间隔倍数,<1表示更频繁>1表示更稀疏 active_hours_multiplier = 0.7 # 活跃时段间隔倍数,<1表示更频繁>1表示更稀疏
# --- 冷却与限制 --- # --- 冷却与限制 ---
reply_reset_enabled = true # bot回复后是否重置定时器(避免回复后立即又主动发言) reply_reset_enabled = true # bot回复后是否重置定时器
topic_throw_cooldown = 3600 # 主动发言后的冷却时间(秒)期间暂停主动思考等待用户回复。0表示不暂停继续主动思考 topic_throw_cooldown = 3600 # 抛出话题后的冷却时间(秒)
max_daily_proactive = 3 # 每个聊天流每天最多主动发言次数0表示不限制 max_daily_proactive = 0 # 每个聊天流每天最多主动发言次数0表示不限制
# --- 决策权重配置 --- # --- 决策权重配置 ---
do_nothing_weight = 0.4 # do_nothing动作的基础权重 do_nothing_weight = 0.4 # do_nothing动作的基础权重
@@ -628,5 +619,50 @@ simple_bubble_weight = 0.3 # simple_bubble动作的基础权重
throw_topic_weight = 0.3 # throw_topic动作的基础权重 throw_topic_weight = 0.3 # throw_topic动作的基础权重
# --- 调试与监控 --- # --- 调试与监控 ---
enable_statistics = false # 是否启用统计功能(记录触发次数、决策分布等) enable_statistics = true # 是否启用统计功能
log_decisions = false # 是否记录每次决策的详细日志(用于调试) log_decisions = false # 是否记录每次决策的详细日志
# ==================== Kokoro Flow Chatter (心流聊天器) 配置 ====================
# KFC是专为私聊设计的深度情感交互处理器。
# 注意这是一个可选的聊天模式关闭后私聊将由默认的AFC处理使用上面的proactive_thinking配置
# 核心理念KFC不是独立人格它复用全局的人设、情感框架和回复模型。
[kokoro_flow_chatter]
# --- 总开关 ---
# 开启后KFC将接管所有私聊消息关闭后私聊消息将由AFC处理。
enable = true
# --- 工作模式 ---
# 可选值: "unified"(统一模式)或 "split"(分离模式)
# unified: 单次LLM调用完成思考和回复生成类似传统聊天方式响应更快
# split: Planner + Replyer两次LLM调用先规划再生成回复控制更精细
mode = "split"
# --- 核心行为配置 ---
max_wait_seconds_default = 300 # 默认的最大等待秒数AI发送消息后愿意等待用户回复的时间
enable_continuous_thinking = true # 是否在等待期间启用心理活动更新
# --- 私聊专属主动思考配置 ---
# 注意这是KFC专属的主动思考配置只有当KFC启用时才生效。
# 它旨在模拟更真实、情感驱动的互动,而非简单的定时任务。
# 「主动思考」是「想一想要不要联系对方」,不是「到时间就发消息」。
# 她可能决定说些什么,也可能决定「算了,不打扰了」。
[kokoro_flow_chatter.proactive_thinking]
enabled = true # 是否启用KFC的私聊主动思考。
# 1. 沉默触发器:当感到长久的沉默时,她可能会想说些什么。
silence_threshold_seconds = 7200 # 用户沉默超过此时长可能触发主动思考默认2小时
# 2. 关系门槛:她不会对不熟悉的人过于主动。
min_affinity_for_proactive = 0.3 # 需要达到最低好感度,她才会开始主动关心。
# 3. 频率呼吸:为了避免打扰,她的关心总是有间隔的。
min_interval_between_proactive = 1800 # 两次主动思考之间的最小间隔默认30分钟
# 4. 勿扰时段:在这个时间范围内,不会触发主动思考(避免深夜打扰用户)。
# 格式为 "HH:MM"使用24小时制。如果 start > end表示跨越午夜如 23:00 到 07:00
quiet_hours_start = "23:00" # 勿扰开始时间
quiet_hours_end = "07:00" # 勿扰结束时间
# 5. 触发概率:每次检查时主动发起的概率,用于避免过于频繁打扰。
trigger_probability = 0.3 # 0.0~1.0默认30%概率