Merge pull request #6 from MoFox-Studio/dev

Dev
This commit is contained in:
yishan
2025-09-23 14:37:10 +08:00
committed by GitHub
10 changed files with 81 additions and 108 deletions

View File

@@ -10,6 +10,8 @@ on:
jobs:
create-prerelease:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
@@ -19,7 +21,7 @@ jobs:
- name: Generate tag name
id: generate_tag
run: echo "TAG_NAME=prerelease-$(date -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT
run: echo "TAG_NAME=MoFox-prerelease-$(date -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT
- name: Create Pre-release
env:

View File

@@ -559,64 +559,16 @@ class HeartFChatting:
if not new_message:
return False, 0.0
new_message_count = len(new_message)
talk_frequency = global_config.chat.get_current_talk_frequency(self.context.stream_id)
modified_exit_count_threshold = self.context.focus_energy * 0.5 / talk_frequency
modified_exit_interest_threshold = 1.5 / talk_frequency
# 计算当前批次消息的兴趣值
batch_interest = 0.0
# 计算平均兴趣值
total_interest = 0.0
message_count = 0
for msg_dict in new_message:
interest_value = msg_dict.get("interest_value", 0.0)
if msg_dict.get("processed_plain_text", ""):
batch_interest += interest_value
total_interest += interest_value
message_count += 1
avg_interest = total_interest / message_count if message_count > 0 else 0.0
# 在breaking形式下累积所有消息的兴趣值
if new_message_count > 0:
self.context.breaking_accumulated_interest += batch_interest
total_interest = self.context.breaking_accumulated_interest
else:
total_interest = self.context.breaking_accumulated_interest
if new_message_count >= modified_exit_count_threshold:
# 记录兴趣度到列表
self.recent_interest_records.append(total_interest)
# 重置累积兴趣值,因为已经达到了消息数量阈值
self.context.breaking_accumulated_interest = 0.0
logger.info(
f"{self.context.log_prefix} 累计消息数量达到{new_message_count}条(>{modified_exit_count_threshold:.1f}),结束等待,累积兴趣值: {total_interest:.2f}"
)
return True, total_interest / new_message_count
# 检查累计兴趣值
if new_message_count > 0:
# 只在兴趣值变化时输出log
if not hasattr(self, "_last_accumulated_interest") or total_interest != self._last_accumulated_interest:
logger.info(
f"{self.context.log_prefix} breaking形式当前累积兴趣值: {total_interest:.2f}, 专注度: {global_config.chat.focus_value:.1f}"
)
self._last_accumulated_interest = total_interest
if total_interest >= modified_exit_interest_threshold:
# 记录兴趣度到列表
self.recent_interest_records.append(total_interest)
# 重置累积兴趣值,因为已经达到了兴趣值阈值
self.context.breaking_accumulated_interest = 0.0
logger.info(
f"{self.context.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{modified_exit_interest_threshold:.1f}),结束等待"
)
return True, total_interest / new_message_count
# 每10秒输出一次等待状态
if (
int(time.time() - self.context.last_read_time) > 0
and int(time.time() - self.context.last_read_time) % 10 == 0
):
logger.info(
f"{self.context.log_prefix} 已等待{time.time() - self.context.last_read_time:.0f}秒,累计{new_message_count}条消息,累积兴趣{total_interest:.1f},继续等待..."
)
await asyncio.sleep(0.5)
return False, 0.0
logger.info(f"{self.context.log_prefix} 收到 {len(new_message)} 条新消息,立即处理!平均兴趣值: {avg_interest:.2f}")
return True, avg_interest

View File

@@ -249,7 +249,7 @@ class ChatManager:
# 检查数据库中是否存在
async def _db_find_stream_async(s_id: str):
async with get_db_session() as session:
return (await session.execute(select(ChatStreams).where(ChatStreams.stream_id == s_id))).scalar()
return (await session.execute(select(ChatStreams).where(ChatStreams.stream_id == s_id))).scalars().first()
model_instance = await _db_find_stream_async(stream_id)
@@ -396,7 +396,7 @@ class ChatManager:
async def _db_load_all_streams_async():
loaded_streams_data = []
async with get_db_session() as session:
for model_instance in (await session.execute(select(ChatStreams))).scalars():
for model_instance in (await session.execute(select(ChatStreams))).scalars().all():
user_info_data = {
"platform": model_instance.user_platform,
"user_id": model_instance.user_id,

View File

@@ -1,7 +1,8 @@
# mmc/src/common/database/db_migration.py
from sqlalchemy import inspect
from sqlalchemy.schema import AddColumn, CreateIndex
from sqlalchemy.schema import CreateIndex
from sqlalchemy.sql import text
from src.common.database.sqlalchemy_models import Base, get_engine
from src.common.logger import get_logger
@@ -27,7 +28,7 @@ async def check_and_migrate_database():
inspector = await connection.run_sync(get_inspector)
# 在同步lambda中传递inspector
db_table_names = await connection.run_sync(lambda conn: set(inspector.get_table_names(conn)))
db_table_names = await connection.run_sync(lambda conn: set(inspector.get_table_names()))
# 1. 首先处理表的创建
tables_to_create = []
@@ -59,53 +60,63 @@ async def check_and_migrate_database():
try:
# 检查并添加缺失的列
db_columns = await connection.run_sync(
lambda conn: {col["name"] for col in inspector.get_columns(table_name, conn)}
lambda conn: {col["name"] for col in inspector.get_columns(table_name)}
)
model_columns = {col.name for col in table.c}
missing_columns = model_columns - db_columns
if missing_columns:
logger.info(f"在表 '{table_name}' 中发现缺失的列: {', '.join(missing_columns)}")
async with connection.begin() as trans:
def add_columns_sync(conn):
dialect = conn.dialect
for column_name in missing_columns:
try:
column = table.c[column_name]
add_column_ddl = AddColumn(table_name, column)
await connection.execute(add_column_ddl)
logger.info(f"成功向表 '{table_name}' 添加列 '{column_name}'")
except Exception as e:
logger.error(
f"向表 '{table_name}' 添加列 '{column_name}' 失败: {e}",
exc_info=True,
)
await trans.rollback()
break # 如果一列失败,则停止处理此表的其他列
column = table.c[column_name]
# 使用DDLCompiler为特定方言编译列
compiler = dialect.ddl_compiler(dialect, None)
# 编译列的数据类型
column_type = compiler.get_column_specification(column)
# 构建原生SQL
sql = f"ALTER TABLE {table.name} ADD COLUMN {column.name} {column_type}"
# 添加默认值(如果存在)
if column.default:
default_value = compiler.render_literal_value(column.default.arg, column.type)
sql += f" DEFAULT {default_value}"
# 添加非空约束(如果存在)
if not column.nullable:
sql += " NOT NULL"
conn.execute(text(sql))
logger.info(f"成功向表 '{table_name}' 添加列 '{column_name}'")
await connection.run_sync(add_columns_sync)
else:
logger.info(f"'{table_name}' 的列结构一致。")
# 检查并创建缺失的索引
db_indexes = await connection.run_sync(
lambda conn: {idx["name"] for idx in inspector.get_indexes(table_name, conn)}
lambda conn: {idx["name"] for idx in inspector.get_indexes(table_name)}
)
model_indexes = {idx.name for idx in table.indexes}
missing_indexes = model_indexes - db_indexes
if missing_indexes:
logger.info(f"在表 '{table_name}' 中发现缺失的索引: {', '.join(missing_indexes)}")
async with connection.begin() as trans:
for index_name in missing_indexes:
try:
def add_indexes_sync(conn):
with conn.begin():
for index_name in missing_indexes:
index_obj = next((idx for idx in table.indexes if idx.name == index_name), None)
if index_obj is not None:
await connection.execute(CreateIndex(index_obj))
conn.execute(CreateIndex(index_obj))
logger.info(f"成功为表 '{table_name}' 创建索引 '{index_name}'")
except Exception as e:
logger.error(
f"为表 '{table_name}' 创建索引 '{index_name}' 失败: {e}",
exc_info=True,
)
await trans.rollback()
break # 如果一个索引失败,则停止处理此表的其他索引
await connection.run_sync(add_indexes_sync)
else:
logger.debug(f"'{table_name}' 的索引一致。")
@@ -114,3 +125,4 @@ async def check_and_migrate_database():
continue
logger.info("数据库结构检查与自动迁移完成。")

View File

@@ -639,14 +639,9 @@ async def initialize_database():
}
)
else:
# SQLite配置 - 异步引擎使用默认连接池
# SQLite配置 - aiosqlite不支持连接池参数
engine_kwargs.update(
{
"pool_size": 20, # 增加池大小
"max_overflow": 30, # 增加溢出连接数
"pool_timeout": 60, # 增加超时时间
"pool_recycle": 3600, # 1小时回收连接
"pool_pre_ping": True, # 连接前ping检查
"connect_args": {
"check_same_thread": False,
"timeout": 30,

View File

@@ -223,6 +223,7 @@ MoFox_Bot(第三方修改版)
from src.plugin_system.apis.permission_api import permission_api
permission_manager = PermissionManager()
await permission_manager.initialize()
permission_api.set_permission_manager(permission_manager)
logger.info("权限管理器初始化成功")

View File

@@ -215,7 +215,7 @@ class PluginBase(ABC):
def _generate_and_save_default_config(self, config_file_path: str):
"""根据插件的Schema生成并保存默认配置文件"""
if not self.config_schema:
logger.debug(f"{self.log_prefix} 插件未定义config_schema不生成配置文件")
logger.info(f"{self.log_prefix} 插件未定义config_schema不生成配置文件")
return
toml_str = f"# {self.plugin_name} - 自动生成的配置文件\n"
@@ -479,6 +479,12 @@ class PluginBase(ABC):
# 检查最终的用户配置文件是否存在
if not os.path.exists(user_config_path):
# 如果插件没有定义config_schema那么不创建文件是正常行为
if not self.config_schema:
logger.debug(f"{self.log_prefix} 插件未定义config_schema使用空的配置.")
self.config = {}
return
logger.warning(f"{self.log_prefix} 用户配置文件 {user_config_path} 不存在且无法创建。")
return

View File

@@ -22,10 +22,15 @@ class PermissionManager(IPermissionManager):
"""权限管理器实现类"""
def __init__(self):
self.engine = get_engine()
self.SessionLocal = async_sessionmaker(bind=self.engine)
self.engine = None
self.SessionLocal = None
self._master_users: Set[Tuple[str, str]] = set()
self._load_master_users()
async def initialize(self):
"""异步初始化数据库连接"""
self.engine = await get_engine()
self.SessionLocal = async_sessionmaker(bind=self.engine)
logger.info("权限管理器初始化完成")
def _load_master_users(self):

View File

@@ -94,11 +94,10 @@ class ScheduleLLMGenerator:
请你扮演我以我的身份和口吻为我生成一份完整的24小时日程表。
"""
attempt = 0
while True:
attempt += 1
max_retries = 3
for attempt in range(1, max_retries + 1):
try:
logger.info(f"正在生成日程 (第 {attempt} 次尝试)")
logger.info(f"正在生成日程 (第 {attempt}/{max_retries} 次尝试)")
prompt = base_prompt
if attempt > 1:
failure_hint = f"""
@@ -118,12 +117,16 @@ class ScheduleLLMGenerator:
return schedule_data
else:
logger.warning(f"{attempt} 次生成的日程验证失败,继续重试...")
await asyncio.sleep(2)
except Exception as e:
logger.error(f"{attempt} 次生成日程失败: {e}")
logger.info("继续重试...")
await asyncio.sleep(3)
if attempt < max_retries:
logger.info("2秒后继续重试...")
await asyncio.sleep(2)
logger.error("所有尝试都失败,无法生成日程,将会在下次启动时自动重试")
return None
@staticmethod
def _validate_schedule_with_pydantic(schedule_data) -> bool:

View File

@@ -1,5 +1,5 @@
[inner]
version = "6.8.8"
version = "6.8.9"
#----以下是给开发人员阅读的如果你只是部署了MoFox-Bot不需要阅读----
#如果你想要修改配置文件请递增version的值
@@ -153,9 +153,6 @@ focus_value = 1
# 例如: ["qq:123456789", "qq:987654321"]
focus_mode_quiet_groups = []
# breaking模式配置
enable_breaking_mode = true # 是否启用自动进入breaking模式关闭后不会自动进入breaking形式
# 强制私聊回复
force_reply_private = false # 是否强制私聊回复,开启后私聊将强制回复