This commit is contained in:
Windpicker-owo
2025-10-02 17:16:02 +08:00
6 changed files with 408 additions and 68 deletions

View File

@@ -124,6 +124,10 @@ async def db_query(
raise ValueError("query_type must be 'get', 'create', 'update', 'delete' or 'count'")
async with get_db_session() as session:
if not session:
logger.error("[SQLAlchemy] 无法获取数据库会话")
return None if single_result else []
if query_type == "get":
query = select(model_class)
@@ -221,7 +225,7 @@ async def db_query(
# 删除记录
affected_rows = 0
for record in records_to_delete:
session.delete(record)
await session.delete(record)
affected_rows += 1
return affected_rows
@@ -274,6 +278,9 @@ async def db_save(
"""
try:
async with get_db_session() as session:
if not session:
logger.error("[SQLAlchemy] 无法获取数据库会话")
return None
# 如果提供了key_field和key_value尝试更新现有记录
if key_field and key_value is not None:
if hasattr(model_class, key_field):

View File

@@ -382,21 +382,19 @@ class BaseAction(ABC):
# 构造命令数据
command_data = {"name": command_name, "args": args or {}}
response = await send_api.adapter_command_to_stream(
action=command_name,
params=args or {},
success = await send_api.command_to_stream(
command=command_data,
stream_id=self.chat_id,
platform=self.platform
storage_message=storage_message,
display_message=display_message,
)
# 根据响应判断成功与否
if response and response.get("status") == "ok":
logger.info(f"{self.log_prefix} 成功执行适配器命令: {command_name}, 响应: {response.get('data')}")
return True
if success:
logger.info(f"{self.log_prefix} 成功发送命令: {command_name}")
else:
error_message = response.get('message', '未知错误')
logger.error(f"{self.log_prefix} 执行适配器命令失败: {command_name}, 错误: {error_message}")
return False
logger.error(f"{self.log_prefix} 发送命令失败: {command_name}")
return success
except Exception as e:
logger.error(f"{self.log_prefix} 发送命令时出错: {e}")

View File

@@ -59,6 +59,8 @@ class QZoneService:
self.cookie_service = cookie_service
# 如果没有提供 reply_tracker 实例,则创建一个新的
self.reply_tracker = reply_tracker if reply_tracker is not None else ReplyTrackerService()
# 用于防止并发回复/评论的内存锁
self.processing_comments = set()
# --- Public Methods (High-Level Business Logic) ---
@@ -159,7 +161,7 @@ class QZoneService:
if self.get_config("monitor.enable_auto_reply", False):
try:
# 传入新参数,表明正在检查自己的说说
own_feeds = await api_client["list_feeds"](qq_account, 5, is_monitoring_own_feeds=True)
own_feeds = await api_client["list_feeds"](qq_account, 5)
if own_feeds:
logger.info(f"获取到自己 {len(own_feeds)} 条说说,检查评论...")
for feed in own_feeds:
@@ -263,37 +265,37 @@ class QZoneService:
return
# 直接检查评论是否已回复,不做验证清理
comments_to_reply = []
comments_to_process = []
for comment in user_comments:
comment_tid = comment.get("comment_tid")
if not comment_tid:
continue
# 检查是否已经在持久化记录中标记为已回复
if not self.reply_tracker.has_replied(fid, comment_tid):
# 记录日志以便追踪
logger.debug(
f"发现新评论需要回复 - 说说ID: {fid}, 评论ID: {comment_tid}, "
f"评论人: {comment.get('nickname', '')}, 内容: {comment.get('content', '')}"
)
comments_to_reply.append(comment)
comment_key = f"{fid}_{comment_tid}"
# 检查持久化记录和内存锁
if not self.reply_tracker.has_replied(fid, comment_tid) and comment_key not in self.processing_comments:
logger.debug(f"锁定待回复评论: {comment_key}")
self.processing_comments.add(comment_key)
comments_to_process.append(comment)
if not comments_to_reply:
logger.debug(f"说说 {fid} 下的所有评论都已回复过或无需回复")
if not comments_to_process:
logger.debug(f"说说 {fid} 下的所有评论都已回复过或正在处理中")
return
logger.info(f"发现自己说说下的 {len(comments_to_reply)} 条新评论,准备回复...")
for comment in comments_to_reply:
logger.info(f"发现自己说说下的 {len(comments_to_process)} 条新评论,准备回复...")
for comment in comments_to_process:
comment_tid = comment.get("comment_tid")
comment_key = f"{fid}_{comment_tid}"
nickname = comment.get("nickname", "")
comment_content = comment.get("content", "")
try:
reply_content = await self.content_service.generate_comment_reply(content, comment_content, nickname)
reply_content = await self.content_service.generate_comment_reply(
content, comment_content, nickname
)
if reply_content:
success = await api_client["reply"](fid, qq_account, nickname, reply_content, comment_tid)
if success:
# 标记为已回复
self.reply_tracker.mark_as_replied(fid, comment_tid)
logger.info(f"成功回复'{nickname}'的评论: '{reply_content}'")
else:
@@ -303,6 +305,11 @@ class QZoneService:
logger.warning(f"生成回复内容失败,跳过回复'{nickname}'的评论")
except Exception as e:
logger.error(f"回复'{nickname}'的评论时发生异常: {e}", exc_info=True)
finally:
# 无论成功与否,都解除锁定
logger.debug(f"解锁评论: {comment_key}")
if comment_key in self.processing_comments:
self.processing_comments.remove(comment_key)
async def _validate_and_cleanup_reply_records(self, fid: str, my_replies: List[Dict]):
"""验证并清理已删除的回复记录"""
@@ -335,11 +342,34 @@ class QZoneService:
rt_con = feed.get("rt_con", "")
images = feed.get("images", [])
if random.random() <= self.get_config("read.comment_possibility", 0.3):
comment_text = await self.content_service.generate_comment(content, target_name, rt_con, images)
if comment_text:
await api_client["comment"](target_qq, fid, comment_text)
# --- 处理评论 ---
comment_key = f"{fid}_main_comment"
should_comment = random.random() <= self.get_config("read.comment_possibility", 0.3)
if (
should_comment
and not self.reply_tracker.has_replied(fid, "main_comment")
and comment_key not in self.processing_comments
):
logger.debug(f"锁定待评论说说: {comment_key}")
self.processing_comments.add(comment_key)
try:
comment_text = await self.content_service.generate_comment(content, target_name, rt_con, images)
if comment_text:
success = await api_client["comment"](target_qq, fid, comment_text)
if success:
self.reply_tracker.mark_as_replied(fid, "main_comment")
logger.info(f"成功评论'{target_name}'的说说: '{comment_text}'")
else:
logger.error(f"评论'{target_name}'的说说失败")
except Exception as e:
logger.error(f"评论'{target_name}'的说说时发生异常: {e}", exc_info=True)
finally:
logger.debug(f"解锁说说: {comment_key}")
if comment_key in self.processing_comments:
self.processing_comments.remove(comment_key)
# --- 处理点赞 (逻辑不变) ---
if random.random() <= self.get_config("read.like_possibility", 1.0):
await api_client["like"](target_qq, fid)
@@ -714,9 +744,10 @@ class QZoneService:
logger.error(f"上传图片 {index + 1} 异常: {e}", exc_info=True)
return None
async def _list_feeds(t_qq: str, num: int, is_monitoring_own_feeds: bool = False) -> List[Dict]:
"""获取指定用户说说列表"""
async def _list_feeds(t_qq: str, num: int) -> List[Dict]:
"""获取指定用户说说列表 (统一接口)"""
try:
# 统一使用 format=json 获取完整评论
params = {
"g_tk": gtk,
"uin": t_qq,
@@ -724,58 +755,73 @@ class QZoneService:
"sort": 0,
"pos": 0,
"num": num,
"replynum": 100,
"callback": "_preloadCallback",
"replynum": 999, # 尽量获取更多
"code_version": 1,
"format": "jsonp",
"format": "json", # 关键使用JSON格式
"need_comment": 1,
}
res_text = await _request("GET", self.LIST_URL, params=params)
json_str = res_text[len("_preloadCallback(") : -2]
json_data = orjson.loads(json_str)
json_data = orjson.loads(res_text)
if json_data.get("code") != 0:
logger.warning(f"获取说说列表API返回错误: code={json_data.get('code')}, message={json_data.get('message')}")
return []
feeds_list = []
my_name = json_data.get("logininfo", {}).get("name", "")
for msg in json_data.get("msglist", []):
# 只有在处理好友说说时,才检查是否已评论并跳过
commentlist = msg.get("commentlist")
# 只有在处理好友说说时,才检查是否已评论并跳过
if not is_monitoring_own_feeds:
for msg in json_data.get("msglist", []):
# 当读取的是好友动态时,检查是否已评论过,如果是则跳过
is_friend_feed = str(t_qq) != str(uin)
if is_friend_feed:
commentlist_for_check = msg.get("commentlist")
is_commented = False
if isinstance(commentlist, list):
if isinstance(commentlist_for_check, list):
is_commented = any(
c.get("name") == my_name for c in commentlist if isinstance(c, dict)
c.get("name") == my_name for c in commentlist_for_check if isinstance(c, dict)
)
if is_commented:
continue
# --- 安全地处理图片列表 ---
images = []
pictotal = msg.get("pictotal")
if isinstance(pictotal, list):
images = [
pic["url1"] for pic in pictotal if isinstance(pic, dict) and "url1" in pic
]
if "pic" in msg and isinstance(msg["pic"], list):
images = [pic.get("url1", "") for pic in msg["pic"] if pic.get("url1")]
elif "pictotal" in msg and isinstance(msg["pictotal"], list):
images = [pic.get("url1", "") for pic in msg["pictotal"] if pic.get("url1")]
# --- 安全地处理评论列表 ---
# --- 解析完整评论列表 (包括二级评论) ---
comments = []
commentlist = msg.get("commentlist")
if isinstance(commentlist, list):
for c in commentlist:
# 确保评论条目也是字典
if isinstance(c, dict):
comments.append(
{
"qq_account": c.get("uin"),
"nickname": c.get("name"),
"content": c.get("content"),
"comment_tid": c.get("tid"),
"parent_tid": c.get("parent_tid"),
}
)
if not isinstance(c, dict):
continue
# 添加主评论
comments.append(
{
"qq_account": c.get("uin"),
"nickname": c.get("name"),
"content": c.get("content"),
"comment_tid": c.get("tid"),
"parent_tid": None, # 主评论没有父ID
}
)
# 检查并添加二级评论 (回复)
if "list_3" in c and isinstance(c["list_3"], list):
for reply in c["list_3"]:
if not isinstance(reply, dict):
continue
comments.append(
{
"qq_account": reply.get("uin"),
"nickname": reply.get("name"),
"content": reply.get("content"),
"comment_tid": reply.get("tid"),
"parent_tid": c.get("tid"), # 父ID是主评论的ID
}
)
feeds_list.append(
{
@@ -791,6 +837,8 @@ class QZoneService:
"comments": comments,
}
)
logger.info(f"成功获取到 {len(feeds_list)} 条说说 from {t_qq} (使用统一JSON接口)")
return feeds_list
except Exception as e:
logger.error(f"获取说说列表失败: {e}", exc_info=True)

View File

@@ -13,6 +13,7 @@ from src.manager.async_task_manager import async_task_manager, AsyncTask
from src.plugin_system import EventType, BaseEventHandler
from src.plugin_system.apis import chat_api, person_api
from src.plugin_system.base.base_event import HandlerResult
from .proactive_thinker_executor import ProactiveThinkerExecutor
logger = get_logger(__name__)
@@ -26,6 +27,7 @@ class ColdStartTask(AsyncTask):
def __init__(self):
super().__init__(task_name="ColdStartTask")
self.chat_manager = get_chat_manager()
self.executor = ProactiveThinkerExecutor()
async def run(self):
"""任务主循环,周期性地检查是否有需要“破冰”的新用户。"""
@@ -70,9 +72,9 @@ class ColdStartTask(AsyncTask):
# 【关键步骤】主动创建聊天流。
# 创建后,该用户就进入了机器人的“好友列表”,后续将由 ProactiveThinkingTask 接管
await self.chat_manager.get_or_create_stream(platform, user_info)
stream = await self.chat_manager.get_or_create_stream(platform, user_info)
# TODO: 在这里调用LLM生成一句自然的、符合人设的“破冰”问候语并发送给用户。
await self.executor.execute(stream_id=stream.stream_id, start_mode="cold_start")
logger.info(f"【冷启动】已为新用户 {chat_id} (昵称: {user_nickname}) 创建聊天流并发送问候。")
except ValueError:
@@ -100,6 +102,7 @@ class ProactiveThinkingTask(AsyncTask):
def __init__(self):
super().__init__(task_name="ProactiveThinkingTask")
self.chat_manager = get_chat_manager()
self.executor = ProactiveThinkerExecutor()
def _get_next_interval(self) -> float:
"""
@@ -174,7 +177,7 @@ class ProactiveThinkingTask(AsyncTask):
if time_since_last_active > next_interval:
logger.info(f"【日常唤醒】聊天流 {stream.stream_id} 已冷却 {time_since_last_active:.2f} 秒,触发主动对话。")
# TODO: 在这里调用LLM生成一句自然的、符合上下文的问候语并发送。
await self.executor.execute(stream_id=stream.stream_id, start_mode="wake_up")
# 【关键步骤】在触发后,立刻更新活跃时间并保存。
# 这可以防止在同一个检查周期内,对同一个目标因为意外的延迟而发送多条消息。

View File

@@ -0,0 +1,284 @@
import orjson
from typing import Optional, Dict, Any
from datetime import datetime
from src.common.logger import get_logger
from src.plugin_system.apis import chat_api, person_api, schedule_api, send_api, llm_api, message_api, generator_api, database_api
from src.config.config import global_config, model_config
from src.person_info.person_info import get_person_info_manager
logger = get_logger(__name__)
class ProactiveThinkerExecutor:
"""
主动思考执行器 V2
- 统一执行入口
- 引入决策模块,判断是否及如何发起对话
- 结合人设、日程、关系信息生成更具情境的对话
"""
def __init__(self):
# 可以在此初始化所需模块例如LLM请求器等
pass
async def execute(self, stream_id: str, start_mode: str = "wake_up"):
"""
统一执行入口
Args:
stream_id: 聊天流ID
start_mode: 启动模式, 'cold_start''wake_up'
"""
logger.info(f"开始为聊天流 {stream_id} 执行主动思考,模式: {start_mode}")
# 1. 信息收集
context = await self._gather_context(stream_id)
if not context:
return
# 2. 决策阶段
decision_result = await self._make_decision(context, start_mode)
if not decision_result or not decision_result.get("should_reply"):
reason = decision_result.get("reason", "未提供") if decision_result else "决策过程返回None"
logger.info(f"决策结果为:不回复。原因: {reason}")
await database_api.store_action_info(
chat_stream=self._get_stream_from_id(stream_id),
action_name="proactive_decision",
action_prompt_display=f"主动思考决定不回复,原因: {reason}",
action_done = True,
action_data=decision_result
)
return
# 3. 规划与执行阶段
topic = decision_result.get("topic", "打个招呼")
reason = decision_result.get("reason", "")
await database_api.store_action_info(
chat_stream=self._get_stream_from_id(stream_id),
action_name="proactive_decision",
action_prompt_display=f"主动思考决定回复,原因: {reason},话题:{topic}",
action_done = True,
action_data=decision_result
)
logger.info(f"决策结果为:回复。话题: {topic}")
plan_prompt = self._build_plan_prompt(context, start_mode, topic, reason)
is_success, response, _, _ = await llm_api.generate_with_model(prompt=plan_prompt, model_config=model_config.model_task_config.utils)
if is_success and response:
stream = self._get_stream_from_id(stream_id)
if stream:
# 使用消息分割器处理并发送消息
reply_set = generator_api.process_human_text(response, enable_splitter=True, enable_chinese_typo=False)
for reply_type, content in reply_set:
if reply_type == "text":
await send_api.text_to_stream(stream_id=stream.stream_id, text=content)
else:
logger.warning(f"无法发送消息,因为找不到 stream_id 为 {stream_id} 的聊天流")
def _get_stream_from_id(self, stream_id: str):
"""根据stream_id解析并获取stream对象"""
try:
platform, chat_id, stream_type = stream_id.split(":")
if stream_type == "private":
return chat_api.ChatManager.get_private_stream_by_user_id(platform, chat_id)
elif stream_type == "group":
return chat_api.ChatManager.get_group_stream_by_group_id(platform, chat_id)
except Exception as e:
logger.error(f"解析 stream_id ({stream_id}) 或获取 stream 失败: {e}")
return None
async def _gather_context(self, stream_id: str) -> Optional[Dict[str, Any]]:
"""
收集构建提示词所需的所有上下文信息
"""
stream = self._get_stream_from_id(stream_id)
if not stream:
logger.warning(f"无法找到 stream_id 为 {stream_id} 的聊天流")
return None
user_info = stream.user_info
if not user_info or not user_info.platform or not user_info.user_id:
logger.warning(f"Stream {stream_id} 的 user_info 不完整")
return None
person_id = person_api.get_person_id(user_info.platform, int(user_info.user_id))
person_info_manager = get_person_info_manager()
# 获取日程
schedules = await schedule_api.ScheduleAPI.get_today_schedule()
schedule_context = "\n".join([f"- {s['title']} ({s['start_time']}-{s['end_time']})" for s in schedules]) if schedules else "今天没有日程安排。"
# 获取关系信息
short_impression = await person_info_manager.get_value(person_id, "short_impression") or ""
impression = await person_info_manager.get_value(person_id, "impression") or ""
attitude = await person_info_manager.get_value(person_id, "attitude") or 50
# 获取最近聊天记录
recent_messages = await message_api.get_recent_messages(stream_id, limit=10)
recent_chat_history = await message_api.build_readable_messages_to_str(recent_messages) if recent_messages else ""
# 获取最近的动作历史
action_history = await database_api.db_query(
database_api.MODEL_MAPPING["ActionRecords"],
filters={"chat_id": stream_id, "action_name": "proactive_decision"},
limit=3,
order_by=["-time"]
)
action_history_context = ""
if isinstance(action_history, list):
action_history_context = "\n".join([f"- {a['action_data']}" for a in action_history if isinstance(a, dict)]) or ""
return {
"person_id": person_id,
"user_info": user_info,
"schedule_context": schedule_context,
"recent_chat_history": recent_chat_history,
"action_history_context": action_history_context,
"relationship": {
"short_impression": short_impression,
"impression": impression,
"attitude": attitude
},
"persona": {
"core": global_config.personality.personality_core,
"side": global_config.personality.personality_side,
"identity": global_config.personality.identity,
},
"current_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
async def _make_decision(self, context: Dict[str, Any], start_mode: str) -> Optional[Dict[str, Any]]:
"""
决策模块:判断是否应该主动发起对话,以及聊什么话题
"""
persona = context['persona']
user_info = context['user_info']
relationship = context['relationship']
prompt = f"""
# 角色
你的名字是{global_config.bot.nickname},你的人设如下:
- 核心人设: {persona['core']}
- 侧面人设: {persona['side']}
- 身份: {persona['identity']}
# 任务
现在是 {context['current_time']},你需要根据当前的情境,决定是否要主动向用户 '{user_info.user_nickname}' 发起对话。
# 情境分析
1. **启动模式**: {start_mode} ({'初次见面/很久未见' if start_mode == 'cold_start' else '日常唤醒'})
2. **你的日程**:
{context['schedule_context']}
3. **你和Ta的关系**:
- 简短印象: {relationship['short_impression']}
- 详细印象: {relationship['impression']}
- 好感度: {relationship['attitude']}/100
4. **最近的聊天摘要**:
{context['recent_chat_history']}
# 决策指令
请综合以上所有信息做出决策。你的决策需要以JSON格式输出包含以下字段
- `should_reply`: bool, 是否应该发起对话。
- `topic`: str, 如果 `should_reply` 为 true你打算聊什么话题(例如:问候一下今天的日程、关心一下昨天的某件事、分享一个你自己的趣事等)
- `reason`: str, 做出此决策的简要理由。
---
示例1 (应该回复):
{{
"should_reply": true,
"topic": "提醒Ta今天下午有'项目会议'的日程",
"reason": "现在是上午Ta下午有个重要会议我觉得应该主动提醒一下这会显得我很贴心。"
}}
示例2 (不应回复):
{{
"should_reply": false,
"topic": null,
"reason": "虽然我们的关系不错但现在是深夜而且Ta今天的日程都已经完成了我没有合适的理由去打扰Ta。"
}}
---
请输出你的决策:
"""
is_success, response, _, _ = await llm_api.generate_with_model(prompt=prompt, model_config=model_config.model_task_config.utils)
if not is_success:
return {"should_reply": False, "reason": "决策模型生成失败"}
try:
# 假设LLM返回JSON格式的决策结果
decision = orjson.loads(response)
return decision
except orjson.JSONDecodeError:
logger.error(f"决策LLM返回的JSON格式无效: {response}")
return {"should_reply": False, "reason": "决策模型返回格式错误"}
def _build_plan_prompt(self, context: Dict[str, Any], start_mode: str, topic: str, reason: str) -> str:
"""
根据启动模式和决策话题,构建最终的规划提示词
"""
persona = context['persona']
user_info = context['user_info']
relationship = context['relationship']
if start_mode == "cold_start":
prompt = f"""
# 角色
你的名字是{global_config.bot.nickname},你的人设如下:
- 核心人设: {persona['core']}
- 侧面人设: {persona['side']}
- 身份: {persona['identity']}
# 任务
你需要主动向一个新朋友 '{user_info.user_nickname}' 发起对话。这是你们的第一次交流,或者很久没聊了。
# 决策上下文
- **决策理由**: {reason}
- **你和Ta的关系**:
- 简短印象: {relationship['short_impression']}
- 详细印象: {relationship['impression']}
- 好感度: {relationship['attitude']}/100
# 对话指引
- 你的目标是“破冰”,让对话自然地开始。
- 你应该围绕这个话题展开: {topic}
- 你的语气应该符合你的人设,友好且真诚。
- 直接输出你要说的第一句话,不要包含任何额外的前缀或解释。
"""
else: # wake_up
prompt = f"""
# 角色
你的名字是{global_config.bot.nickname},你的人设如下:
- 核心人设: {persona['core']}
- 侧面人设: {persona['side']}
- 身份: {persona['identity']}
# 任务
现在是 {context['current_time']},你需要主动向你的朋友 '{user_info.user_nickname}' 发起对话。
# 决策上下文
- **决策理由**: {reason}
# 情境分析
1. **你的日程**:
{context['schedule_context']}
2. **你和Ta的关系**:
- 详细印象: {relationship['impression']}
- 好感度: {relationship['attitude']}/100
3. **最近的聊天摘要**:
{context['recent_chat_history']}
4. **你最近的相关动作**:
{context['action_history_context']}
# 对话指引
- 你决定和Ta聊聊关于“{topic}”的话题。
- 请结合以上所有情境信息,自然地开启对话。
- 你的语气应该符合你的人设以及你对Ta的好感度。
- 直接输出你要说的第一句话,不要包含任何额外的前缀或解释。
"""
return prompt

View File

@@ -319,7 +319,7 @@ class SetEmojiLikeAction(BaseAction):
try:
success = await self.send_command(
command_name="set_msg_emoji_like",
command_name="set_emoji_like",
args={"message_id": message_id, "emoji_id": emoji_id, "set": set_like},
storage_message=False,
)