Merge branch 'dev' of https://github.com/A0000Xz/MaiBot into dev
This commit is contained in:
@@ -119,7 +119,7 @@ class ExpressionLearner:
|
||||
min_len = min(len(s1), len(s2))
|
||||
if min_len < 5:
|
||||
return False
|
||||
same = sum(1 for a, b in zip(s1, s2) if a == b)
|
||||
same = sum(1 for a, b in zip(s1, s2, strict=False) if a == b)
|
||||
return same / min_len > 0.8
|
||||
|
||||
async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]:
|
||||
|
||||
@@ -459,7 +459,7 @@ class HeartFChatting:
|
||||
logger.debug(f"{self.log_prefix} 从action_data中获取系统命令: {command}")
|
||||
|
||||
# 新增:消息计数和疲惫检查
|
||||
if action == "reply" and success:
|
||||
if action == "reply" and success and self.chat_stream.context.message.message_info.group_info:
|
||||
self._message_count += 1
|
||||
current_threshold = self._get_current_fatigue_threshold()
|
||||
logger.info(
|
||||
@@ -501,7 +501,7 @@ class HeartFChatting:
|
||||
Returns:
|
||||
int: 当前的疲惫阈值
|
||||
"""
|
||||
return max(10, int(30 / global_config.chat.exit_focus_threshold))
|
||||
return max(10, int(30 * global_config.chat.exit_focus_threshold))
|
||||
|
||||
def get_message_count_info(self) -> dict:
|
||||
"""获取消息计数信息
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
from src.manager.mood_manager import mood_manager
|
||||
import enum
|
||||
|
||||
|
||||
@@ -12,6 +11,3 @@ class ChatStateInfo:
|
||||
def __init__(self):
|
||||
self.chat_status: ChatState = ChatState.NORMAL
|
||||
self.current_state_time = 120
|
||||
|
||||
self.mood_manager = mood_manager
|
||||
self.mood = self.mood_manager.get_mood_prompt()
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from src.chat.memory_system.Hippocampus import hippocampus_manager
|
||||
from src.config.config import global_config
|
||||
import asyncio
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.chat.heart_flow.heartflow import heartflow
|
||||
@@ -13,6 +14,7 @@ import traceback
|
||||
from typing import Tuple
|
||||
|
||||
from src.person_info.relationship_manager import get_relationship_manager
|
||||
from src.mood.mood_manager import mood_manager
|
||||
|
||||
|
||||
logger = get_logger("chat")
|
||||
@@ -49,13 +51,12 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
|
||||
is_mentioned, _ = is_mentioned_bot_in_message(message)
|
||||
interested_rate = 0.0
|
||||
|
||||
if global_config.memory.enable_memory:
|
||||
with Timer("记忆激活"):
|
||||
interested_rate = await hippocampus_manager.get_activate_from_text(
|
||||
message.processed_plain_text,
|
||||
fast_retrieval=True,
|
||||
)
|
||||
logger.debug(f"记忆激活率: {interested_rate:.2f}")
|
||||
with Timer("记忆激活"):
|
||||
interested_rate = await hippocampus_manager.get_activate_from_text(
|
||||
message.processed_plain_text,
|
||||
fast_retrieval=False,
|
||||
)
|
||||
logger.debug(f"记忆激活率: {interested_rate:.2f}")
|
||||
|
||||
text_len = len(message.processed_plain_text)
|
||||
# 根据文本长度调整兴趣度,长度越大兴趣度越高,但增长率递减,最低0.01,最高0.05
|
||||
@@ -105,15 +106,19 @@ class HeartFCMessageReceiver:
|
||||
group_info=groupinfo,
|
||||
)
|
||||
|
||||
interested_rate, is_mentioned = await _calculate_interest(message)
|
||||
message.interest_value = interested_rate
|
||||
|
||||
await self.storage.store_message(message, chat)
|
||||
|
||||
subheartflow = await heartflow.get_or_create_subheartflow(chat.stream_id)
|
||||
message.update_chat_stream(chat)
|
||||
|
||||
# 6. 兴趣度计算与更新
|
||||
interested_rate, is_mentioned = await _calculate_interest(message)
|
||||
subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned)
|
||||
|
||||
chat_mood = mood_manager.get_mood_by_chat_id(subheartflow.chat_id)
|
||||
asyncio.create_task(chat_mood.update_mood_by_message(message, interested_rate))
|
||||
|
||||
# 7. 日志记录
|
||||
mes_name = chat.group_info.group_name if chat.group_info else "私聊"
|
||||
# current_time = time.strftime("%H:%M:%S", time.localtime(message.message_info.time))
|
||||
|
||||
@@ -26,8 +26,6 @@ class SubHeartflow:
|
||||
|
||||
Args:
|
||||
subheartflow_id: 子心流唯一标识符
|
||||
mai_states: 麦麦状态信息实例
|
||||
hfc_no_reply_callback: HFChatting 连续不回复时触发的回调
|
||||
"""
|
||||
# 基础属性,两个值是一样的
|
||||
self.subheartflow_id = subheartflow_id
|
||||
|
||||
@@ -59,7 +59,7 @@ EMBEDDING_SIM_THRESHOLD = 0.99
|
||||
|
||||
def cosine_similarity(a, b):
|
||||
# 计算余弦相似度
|
||||
dot = sum(x * y for x, y in zip(a, b))
|
||||
dot = sum(x * y for x, y in zip(a, b, strict=False))
|
||||
norm_a = math.sqrt(sum(x * x for x in a))
|
||||
norm_b = math.sqrt(sum(x * x for x in b))
|
||||
if norm_a == 0 or norm_b == 0:
|
||||
@@ -285,7 +285,7 @@ class EmbeddingStore:
|
||||
distances = list(distances.flatten())
|
||||
result = [
|
||||
(self.idx2hash[str(int(idx))], float(sim))
|
||||
for (idx, sim) in zip(indices, distances)
|
||||
for (idx, sim) in zip(indices, distances, strict=False)
|
||||
if idx in range(len(self.idx2hash))
|
||||
]
|
||||
|
||||
|
||||
@@ -205,7 +205,7 @@ class Hippocampus:
|
||||
# 从数据库加载记忆图
|
||||
self.entorhinal_cortex.sync_memory_from_db()
|
||||
# TODO: API-Adapter修改标记
|
||||
self.model_summary = LLMRequest(global_config.model.memory_summary, request_type="memory")
|
||||
self.model_summary = LLMRequest(global_config.model.memory, request_type="memory")
|
||||
|
||||
def get_all_node_names(self) -> list:
|
||||
"""获取记忆图中所有节点的名字列表"""
|
||||
@@ -819,7 +819,7 @@ class EntorhinalCortex:
|
||||
timestamps = sample_scheduler.get_timestamp_array()
|
||||
# 使用 translate_timestamp_to_human_readable 并指定 mode="normal"
|
||||
readable_timestamps = [translate_timestamp_to_human_readable(ts, mode="normal") for ts in timestamps]
|
||||
for _, readable_timestamp in zip(timestamps, readable_timestamps):
|
||||
for _, readable_timestamp in zip(timestamps, readable_timestamps, strict=False):
|
||||
logger.debug(f"回忆往事: {readable_timestamp}")
|
||||
chat_samples = []
|
||||
for timestamp in timestamps:
|
||||
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
from typing import Dict, Any
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.manager.mood_manager import mood_manager # 导入情绪管理器
|
||||
from src.mood.mood_manager import mood_manager # 导入情绪管理器
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.experimental.only_message_process import MessageProcessor
|
||||
|
||||
@@ -113,6 +113,7 @@ class MessageRecv(Message):
|
||||
self.is_mentioned = None
|
||||
self.priority_mode = "interest"
|
||||
self.priority_info = None
|
||||
self.interest_value = None
|
||||
|
||||
def update_chat_stream(self, chat_stream: "ChatStream"):
|
||||
self.chat_stream = chat_stream
|
||||
@@ -337,6 +338,8 @@ class MessageSending(MessageProcessBase):
|
||||
# 用于显示发送内容与显示不一致的情况
|
||||
self.display_message = display_message
|
||||
|
||||
self.interest_value = 0.0
|
||||
|
||||
def build_reply(self):
|
||||
"""设置回复消息"""
|
||||
if self.reply:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import re
|
||||
import traceback
|
||||
from typing import Union
|
||||
|
||||
# from ...common.database.database import db # db is now Peewee's SqliteDatabase instance
|
||||
@@ -36,11 +37,11 @@ class MessageStorage:
|
||||
filtered_display_message = re.sub(pattern, "", display_message, flags=re.DOTALL)
|
||||
else:
|
||||
filtered_display_message = ""
|
||||
|
||||
interest_value = 0
|
||||
reply_to = message.reply_to
|
||||
else:
|
||||
filtered_display_message = ""
|
||||
|
||||
interest_value = message.interest_value
|
||||
reply_to = ""
|
||||
|
||||
chat_info_dict = chat_stream.to_dict()
|
||||
@@ -85,9 +86,11 @@ class MessageStorage:
|
||||
processed_plain_text=filtered_processed_plain_text,
|
||||
display_message=filtered_display_message,
|
||||
memorized_times=message.memorized_times,
|
||||
interest_value=interest_value,
|
||||
)
|
||||
except Exception:
|
||||
logger.exception("存储消息失败")
|
||||
traceback.print_exc()
|
||||
|
||||
@staticmethod
|
||||
async def store_recalled_message(message_id: str, time: str, chat_stream: ChatStream) -> None:
|
||||
|
||||
@@ -22,7 +22,7 @@ from src.chat.planner_actions.planner import ActionPlanner
|
||||
from src.chat.planner_actions.action_modifier import ActionModifier
|
||||
|
||||
from src.chat.utils.utils import get_chat_type_and_target_info
|
||||
from src.manager.mood_manager import mood_manager
|
||||
from src.mood.mood_manager import mood_manager
|
||||
|
||||
willing_manager = get_willing_manager()
|
||||
|
||||
@@ -304,7 +304,9 @@ class NormalChat:
|
||||
|
||||
semaphore = asyncio.Semaphore(5)
|
||||
|
||||
async def process_and_acquire(msg_id, message, interest_value, is_mentioned):
|
||||
async def process_and_acquire(
|
||||
msg_id, message, interest_value, is_mentioned, semaphore=semaphore
|
||||
):
|
||||
"""处理单个兴趣消息并管理信号量"""
|
||||
async with semaphore:
|
||||
try:
|
||||
|
||||
@@ -299,7 +299,7 @@ class ActionModifier:
|
||||
task_results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
# 处理结果并更新缓存
|
||||
for _, (action_name, result) in enumerate(zip(task_names, task_results)):
|
||||
for _, (action_name, result) in enumerate(zip(task_names, task_results, strict=False)):
|
||||
if isinstance(result, Exception):
|
||||
logger.error(f"{self.log_prefix}LLM判定action {action_name} 时出错: {result}")
|
||||
results[action_name] = False
|
||||
|
||||
@@ -11,7 +11,12 @@ from json_repair import repair_json
|
||||
from src.chat.utils.utils import get_chat_type_and_target_info
|
||||
from datetime import datetime
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
|
||||
from src.chat.utils.chat_message_builder import (
|
||||
build_readable_actions,
|
||||
build_readable_messages,
|
||||
get_actions_by_timestamp_with_chat,
|
||||
get_raw_msg_before_timestamp_with_chat,
|
||||
)
|
||||
import time
|
||||
|
||||
logger = get_logger("planner")
|
||||
@@ -27,9 +32,14 @@ def init_prompt():
|
||||
你现在需要根据聊天内容,选择的合适的action来参与聊天。
|
||||
{chat_context_description},以下是具体的聊天内容:
|
||||
{chat_content_block}
|
||||
|
||||
|
||||
{moderation_prompt}
|
||||
|
||||
现在请你根据{by_what}选择合适的action:
|
||||
你刚刚选择并执行过的action是:
|
||||
{actions_before_now_block}
|
||||
|
||||
{no_action_block}
|
||||
{action_options_text}
|
||||
|
||||
@@ -222,6 +232,16 @@ class ActionPlanner:
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
actions_before_now = get_actions_by_timestamp_with_chat(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_end=time.time(),
|
||||
limit=5,
|
||||
)
|
||||
|
||||
actions_before_now_block = build_readable_actions(
|
||||
actions=actions_before_now,
|
||||
)
|
||||
|
||||
self.last_obs_time_mark = time.time()
|
||||
|
||||
if self.mode == "focus":
|
||||
@@ -285,6 +305,7 @@ class ActionPlanner:
|
||||
by_what=by_what,
|
||||
chat_context_description=chat_context_description,
|
||||
chat_content_block=chat_content_block,
|
||||
actions_before_now_block=actions_before_now_block,
|
||||
no_action_block=no_action_block,
|
||||
action_options_text=action_options_block,
|
||||
moderation_prompt=moderation_prompt_block,
|
||||
|
||||
@@ -18,7 +18,7 @@ from src.chat.utils.chat_message_builder import build_readable_messages, get_raw
|
||||
import time
|
||||
import asyncio
|
||||
from src.chat.express.expression_selector import expression_selector
|
||||
from src.manager.mood_manager import mood_manager
|
||||
from src.mood.mood_manager import mood_manager
|
||||
from src.person_info.relationship_fetcher import relationship_fetcher_manager
|
||||
import random
|
||||
import ast
|
||||
@@ -55,9 +55,9 @@ def init_prompt():
|
||||
{identity}
|
||||
|
||||
{action_descriptions}
|
||||
你正在{chat_target_2},现在请你读读之前的聊天记录,{mood_prompt},请你给出回复
|
||||
{config_expression_style}。
|
||||
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,注意不要复读你说过的话。
|
||||
你正在{chat_target_2},你现在的心情是:{mood_state}
|
||||
现在请你读读之前的聊天记录,并给出回复
|
||||
{config_expression_style}。注意不要复读你说过的话
|
||||
{keywords_reaction_prompt}
|
||||
请注意不要输出多余内容(包括前后缀,冒号和引号,at或 @等 )。只输出回复内容。
|
||||
{moderation_prompt}
|
||||
@@ -504,6 +504,9 @@ class DefaultReplyer:
|
||||
reply_to = reply_data.get("reply_to", "none")
|
||||
extra_info_block = reply_data.get("extra_info", "") or reply_data.get("extra_info_block", "")
|
||||
|
||||
chat_mood = mood_manager.get_mood_by_chat_id(chat_id)
|
||||
mood_prompt = chat_mood.mood_state
|
||||
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
|
||||
# 构建action描述 (如果启用planner)
|
||||
@@ -639,8 +642,6 @@ class DefaultReplyer:
|
||||
else:
|
||||
reply_target_block = ""
|
||||
|
||||
mood_prompt = mood_manager.get_mood_prompt()
|
||||
|
||||
prompt_info = await get_prompt_info(target, threshold=0.38)
|
||||
if prompt_info:
|
||||
prompt_info = await global_prompt_manager.format_prompt("knowledge_prompt", prompt_info=prompt_info)
|
||||
@@ -682,7 +683,7 @@ class DefaultReplyer:
|
||||
config_expression_style=global_config.expression.expression_style,
|
||||
action_descriptions=action_descriptions,
|
||||
chat_target_2=chat_target_2,
|
||||
mood_prompt=mood_prompt,
|
||||
mood_state=mood_prompt,
|
||||
)
|
||||
|
||||
return prompt
|
||||
@@ -774,8 +775,6 @@ class DefaultReplyer:
|
||||
else:
|
||||
reply_target_block = ""
|
||||
|
||||
mood_manager.get_mood_prompt()
|
||||
|
||||
if is_group_chat:
|
||||
chat_target_1 = await global_prompt_manager.get_prompt_async("chat_target_group1")
|
||||
chat_target_2 = await global_prompt_manager.get_prompt_async("chat_target_group2")
|
||||
@@ -975,7 +974,7 @@ def weighted_sample_no_replacement(items, weights, k) -> list:
|
||||
2. 不会重复选中同一个元素
|
||||
"""
|
||||
selected = []
|
||||
pool = list(zip(items, weights))
|
||||
pool = list(zip(items, weights, strict=False))
|
||||
for _ in range(min(k, len(pool))):
|
||||
total = sum(w for _, w in pool)
|
||||
r = random.uniform(0, total)
|
||||
|
||||
@@ -77,6 +77,60 @@ def get_raw_msg_by_timestamp_with_chat_users(
|
||||
return find_messages(message_filter=filter_query, sort=sort_order, limit=limit, limit_mode=limit_mode)
|
||||
|
||||
|
||||
def get_actions_by_timestamp_with_chat(
|
||||
chat_id: str,
|
||||
timestamp_start: float = 0,
|
||||
timestamp_end: float = time.time(),
|
||||
limit: int = 0,
|
||||
limit_mode: str = "latest",
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""获取在特定聊天从指定时间戳到指定时间戳的动作记录,按时间升序排序,返回动作记录列表"""
|
||||
query = ActionRecords.select().where(
|
||||
(ActionRecords.chat_id == chat_id)
|
||||
& (ActionRecords.time > timestamp_start)
|
||||
& (ActionRecords.time < timestamp_end)
|
||||
)
|
||||
|
||||
if limit > 0:
|
||||
if limit_mode == "latest":
|
||||
query = query.order_by(ActionRecords.time.desc()).limit(limit)
|
||||
# 获取后需要反转列表,以保持最终输出为时间升序
|
||||
actions = list(query)
|
||||
return [action.__data__ for action in reversed(actions)]
|
||||
else: # earliest
|
||||
query = query.order_by(ActionRecords.time.asc()).limit(limit)
|
||||
else:
|
||||
query = query.order_by(ActionRecords.time.asc())
|
||||
|
||||
actions = list(query)
|
||||
return [action.__data__ for action in actions]
|
||||
|
||||
|
||||
def get_actions_by_timestamp_with_chat_inclusive(
|
||||
chat_id: str, timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest"
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""获取在特定聊天从指定时间戳到指定时间戳的动作记录(包含边界),按时间升序排序,返回动作记录列表"""
|
||||
query = ActionRecords.select().where(
|
||||
(ActionRecords.chat_id == chat_id)
|
||||
& (ActionRecords.time >= timestamp_start)
|
||||
& (ActionRecords.time <= timestamp_end)
|
||||
)
|
||||
|
||||
if limit > 0:
|
||||
if limit_mode == "latest":
|
||||
query = query.order_by(ActionRecords.time.desc()).limit(limit)
|
||||
# 获取后需要反转列表,以保持最终输出为时间升序
|
||||
actions = list(query)
|
||||
return [action.__data__ for action in reversed(actions)]
|
||||
else: # earliest
|
||||
query = query.order_by(ActionRecords.time.asc()).limit(limit)
|
||||
else:
|
||||
query = query.order_by(ActionRecords.time.asc())
|
||||
|
||||
actions = list(query)
|
||||
return [action.__data__ for action in actions]
|
||||
|
||||
|
||||
def get_raw_msg_by_timestamp_random(
|
||||
timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest"
|
||||
) -> List[Dict[str, Any]]:
|
||||
@@ -503,6 +557,45 @@ def build_pic_mapping_info(pic_id_mapping: Dict[str, str]) -> str:
|
||||
return "\n".join(mapping_lines)
|
||||
|
||||
|
||||
def build_readable_actions(actions: List[Dict[str, Any]]) -> str:
|
||||
"""
|
||||
将动作列表转换为可读的文本格式。
|
||||
格式: 在()分钟前,你使用了(action_name),具体内容是:(action_prompt_display)
|
||||
|
||||
Args:
|
||||
actions: 动作记录字典列表。
|
||||
|
||||
Returns:
|
||||
格式化的动作字符串。
|
||||
"""
|
||||
if not actions:
|
||||
return ""
|
||||
|
||||
output_lines = []
|
||||
current_time = time.time()
|
||||
|
||||
# The get functions return actions sorted ascending by time. Let's reverse it to show newest first.
|
||||
# sorted_actions = sorted(actions, key=lambda x: x.get("time", 0), reverse=True)
|
||||
|
||||
for action in actions:
|
||||
action_time = action.get("time", current_time)
|
||||
action_name = action.get("action_name", "未知动作")
|
||||
action_prompt_display = action.get("action_prompt_display", "无具体内容")
|
||||
|
||||
time_diff_seconds = current_time - action_time
|
||||
|
||||
if time_diff_seconds < 60:
|
||||
time_ago_str = f"在{int(time_diff_seconds)}秒前"
|
||||
else:
|
||||
time_diff_minutes = round(time_diff_seconds / 60)
|
||||
time_ago_str = f"在{int(time_diff_minutes)}分钟前"
|
||||
|
||||
line = f"{time_ago_str},你使用了“{action_name}”,具体内容是:“{action_prompt_display}”"
|
||||
output_lines.append(line)
|
||||
|
||||
return "\n".join(output_lines)
|
||||
|
||||
|
||||
async def build_readable_messages_with_list(
|
||||
messages: List[Dict[str, Any]],
|
||||
replace_bot_name: bool = True,
|
||||
|
||||
@@ -363,7 +363,7 @@ class ChineseTypoGenerator:
|
||||
else:
|
||||
# 处理多字词的单字替换
|
||||
word_result = []
|
||||
for _, (char, py) in enumerate(zip(word, word_pinyin)):
|
||||
for _, (char, py) in enumerate(zip(word, word_pinyin, strict=False)):
|
||||
# 词中的字替换概率降低
|
||||
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))
|
||||
|
||||
|
||||
@@ -8,7 +8,8 @@ import numpy as np
|
||||
from maim_message import UserInfo
|
||||
|
||||
from src.common.logger import get_logger
|
||||
from src.manager.mood_manager import mood_manager
|
||||
|
||||
# from src.mood.mood_manager import mood_manager
|
||||
from ..message_receive.message import MessageRecv
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from .typo_generator import ChineseTypoGenerator
|
||||
@@ -412,12 +413,12 @@ def calculate_typing_time(
|
||||
- 在所有输入结束后,额外加上回车时间0.3秒
|
||||
- 如果is_emoji为True,将使用固定1秒的输入时间
|
||||
"""
|
||||
# 将0-1的唤醒度映射到-1到1
|
||||
mood_arousal = mood_manager.current_mood.arousal
|
||||
# 映射到0.5到2倍的速度系数
|
||||
typing_speed_multiplier = 1.5**mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
|
||||
chinese_time *= 1 / typing_speed_multiplier
|
||||
english_time *= 1 / typing_speed_multiplier
|
||||
# # 将0-1的唤醒度映射到-1到1
|
||||
# mood_arousal = mood_manager.current_mood.arousal
|
||||
# # 映射到0.5到2倍的速度系数
|
||||
# typing_speed_multiplier = 1.5**mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
|
||||
# chinese_time *= 1 / typing_speed_multiplier
|
||||
# english_time *= 1 / typing_speed_multiplier
|
||||
# 计算中文字符数
|
||||
chinese_chars = sum(1 for char in input_string if "\u4e00" <= char <= "\u9fff")
|
||||
|
||||
|
||||
@@ -33,9 +33,9 @@ class MemoryManager:
|
||||
self._id_map: Dict[str, MemoryItem] = {}
|
||||
|
||||
self.llm_summarizer = LLMRequest(
|
||||
model=global_config.model.focus_working_memory,
|
||||
model=global_config.model.memory,
|
||||
temperature=0.3,
|
||||
request_type="focus.processor.working_memory",
|
||||
request_type="working_memory",
|
||||
)
|
||||
|
||||
@property
|
||||
|
||||
@@ -129,6 +129,8 @@ class Messages(BaseModel):
|
||||
|
||||
reply_to = TextField(null=True)
|
||||
|
||||
interest_value = DoubleField(null=True)
|
||||
|
||||
# 从 chat_info 扁平化而来的字段
|
||||
chat_info_stream_id = TextField()
|
||||
chat_info_platform = TextField()
|
||||
|
||||
@@ -94,7 +94,7 @@ class ConfigBase:
|
||||
raise TypeError(
|
||||
f"Expected {len(field_type_args)} items for {field_type.__name__}, got {len(value)}"
|
||||
)
|
||||
return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args))
|
||||
return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args, strict=False))
|
||||
|
||||
if field_origin_type is dict:
|
||||
# 检查提供的value是否为dict
|
||||
|
||||
@@ -57,15 +57,10 @@ class RelationshipConfig(ConfigBase):
|
||||
"""关系配置类"""
|
||||
|
||||
enable_relationship: bool = True
|
||||
|
||||
give_name: bool = False
|
||||
"""是否给其他人取名"""
|
||||
|
||||
build_relationship_interval: int = 600
|
||||
"""构建关系间隔 单位秒,如果为0则不构建关系"""
|
||||
"""是否启用关系系统"""
|
||||
|
||||
relation_frequency: int = 1
|
||||
"""关系频率,麦麦构建关系的速度,仅在normal_chat模式下有效"""
|
||||
"""关系频率,麦麦构建关系的速度"""
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -637,32 +632,20 @@ class ModelConfig(ConfigBase):
|
||||
replyer_2: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""normal_chat次要回复模型配置"""
|
||||
|
||||
memory_summary: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""记忆的概括模型配置"""
|
||||
memory: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""记忆模型配置"""
|
||||
|
||||
emotion: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""情绪模型配置"""
|
||||
|
||||
vlm: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""视觉语言模型配置"""
|
||||
|
||||
focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""专注工作记忆模型配置"""
|
||||
|
||||
tool_use: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""专注工具使用模型配置"""
|
||||
|
||||
planner: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""规划模型配置"""
|
||||
|
||||
relation: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""关系模型配置"""
|
||||
|
||||
embedding: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""嵌入模型配置"""
|
||||
|
||||
pfc_action_planner: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""PFC动作规划模型配置"""
|
||||
|
||||
pfc_chat: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""PFC聊天模型配置"""
|
||||
|
||||
pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
|
||||
"""PFC回复检查模型配置"""
|
||||
|
||||
13
src/main.py
13
src/main.py
@@ -6,7 +6,6 @@ from src.chat.express.exprssion_learner import get_expression_learner
|
||||
from src.common.remote import TelemetryHeartBeatTask
|
||||
from src.manager.async_task_manager import async_task_manager
|
||||
from src.chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
|
||||
from src.manager.mood_manager import MoodPrintTask, MoodUpdateTask
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
from src.chat.normal_chat.willing.willing_manager import get_willing_manager
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
@@ -17,6 +16,7 @@ from src.chat.message_receive.bot import chat_bot
|
||||
from src.common.logger import get_logger
|
||||
from src.individuality.individuality import get_individuality, Individuality
|
||||
from src.common.server import get_global_server, Server
|
||||
from src.mood.mood_manager import mood_manager
|
||||
from rich.traceback import install
|
||||
# from src.api.main import start_api_server
|
||||
|
||||
@@ -95,18 +95,15 @@ class MainSystem:
|
||||
get_emoji_manager().initialize()
|
||||
logger.info("表情包管理器初始化成功")
|
||||
|
||||
# 添加情绪衰减任务
|
||||
await async_task_manager.add_task(MoodUpdateTask())
|
||||
# 添加情绪打印任务
|
||||
await async_task_manager.add_task(MoodPrintTask())
|
||||
|
||||
logger.info("情绪管理器初始化成功")
|
||||
|
||||
# 启动愿望管理器
|
||||
await willing_manager.async_task_starter()
|
||||
|
||||
logger.info("willing管理器初始化成功")
|
||||
|
||||
# 启动情绪管理器
|
||||
await mood_manager.start()
|
||||
logger.info("情绪管理器初始化成功")
|
||||
|
||||
# 初始化聊天管理器
|
||||
|
||||
await get_chat_manager()._initialize()
|
||||
|
||||
242
src/mais4u/mais4u_chat/body_emotion_action_manager.py
Normal file
242
src/mais4u/mais4u_chat/body_emotion_action_manager.py
Normal file
@@ -0,0 +1,242 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.manager.async_task_manager import AsyncTask, async_task_manager
|
||||
from json_repair import repair_json
|
||||
|
||||
logger = get_logger("action")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是群里正在进行的聊天记录
|
||||
|
||||
{indentify_block}
|
||||
你现在的动作状态是:
|
||||
- 手部:{hand_action}
|
||||
- 上半身:{upper_body_action}
|
||||
- 头部:{head_action}
|
||||
|
||||
现在,因为你发送了消息,或者群里其他人发送了消息,引起了你的注意,你对其进行了阅读和思考,请你更新你的动作状态。
|
||||
请只按照以下json格式输出,描述你新的动作状态,每个动作一到三个中文词,确保每个字段都存在:
|
||||
{{
|
||||
"hand_action": "...",
|
||||
"upper_body_action": "...",
|
||||
"head_action": "..."
|
||||
}}
|
||||
""",
|
||||
"change_action_prompt",
|
||||
)
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是群里最近的聊天记录
|
||||
|
||||
{indentify_block}
|
||||
你之前的动作状态是:
|
||||
- 手部:{hand_action}
|
||||
- 上半身:{upper_body_action}
|
||||
- 头部:{head_action}
|
||||
|
||||
距离你上次关注群里消息已经过去了一段时间,你冷静了下来,你的动作会趋于平缓或静止,请你输出你现在新的动作状态,用中文。
|
||||
请只按照以下json格式输出,描述你新的动作状态,每个动作一到三个词,确保每个字段都存在:
|
||||
{{
|
||||
"hand_action": "...",
|
||||
"upper_body_action": "...",
|
||||
"head_action": "..."
|
||||
}}
|
||||
""",
|
||||
"regress_action_prompt",
|
||||
)
|
||||
|
||||
|
||||
class ChatAction:
|
||||
def __init__(self, chat_id: str):
|
||||
self.chat_id: str = chat_id
|
||||
self.hand_action: str = "双手放在桌面"
|
||||
self.upper_body_action: str = "坐着"
|
||||
self.head_action: str = "注视摄像机"
|
||||
|
||||
self.regression_count: int = 0
|
||||
|
||||
self.action_model = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.7,
|
||||
request_type="action",
|
||||
)
|
||||
|
||||
self.last_change_time = 0
|
||||
|
||||
async def update_action_by_message(self, message: MessageRecv):
|
||||
self.regression_count = 0
|
||||
|
||||
message_time = message.message_info.time
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
timestamp_end=message_time,
|
||||
limit=15,
|
||||
limit_mode="last",
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"change_action_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
hand_action=self.hand_action,
|
||||
upper_body_action=self.upper_body_action,
|
||||
head_action=self.head_action,
|
||||
)
|
||||
|
||||
logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
|
||||
logger.info(f"response: {response}")
|
||||
logger.info(f"reasoning_content: {reasoning_content}")
|
||||
|
||||
action_data = json.loads(repair_json(response))
|
||||
|
||||
if action_data:
|
||||
self.hand_action = action_data.get("hand_action", self.hand_action)
|
||||
self.upper_body_action = action_data.get("upper_body_action", self.upper_body_action)
|
||||
self.head_action = action_data.get("head_action", self.head_action)
|
||||
|
||||
self.last_change_time = message_time
|
||||
|
||||
async def regress_action(self):
|
||||
message_time = time.time()
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
timestamp_end=message_time,
|
||||
limit=15,
|
||||
limit_mode="last",
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"regress_action_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
hand_action=self.hand_action,
|
||||
upper_body_action=self.upper_body_action,
|
||||
head_action=self.head_action,
|
||||
)
|
||||
|
||||
logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
|
||||
logger.info(f"response: {response}")
|
||||
logger.info(f"reasoning_content: {reasoning_content}")
|
||||
|
||||
action_data = json.loads(repair_json(response))
|
||||
if action_data:
|
||||
self.hand_action = action_data.get("hand_action", self.hand_action)
|
||||
self.upper_body_action = action_data.get("upper_body_action", self.upper_body_action)
|
||||
self.head_action = action_data.get("head_action", self.head_action)
|
||||
|
||||
self.regression_count += 1
|
||||
|
||||
|
||||
class ActionRegressionTask(AsyncTask):
|
||||
def __init__(self, action_manager: "ActionManager"):
|
||||
super().__init__(task_name="ActionRegressionTask", run_interval=30)
|
||||
self.action_manager = action_manager
|
||||
|
||||
async def run(self):
|
||||
logger.debug("Running action regression task...")
|
||||
now = time.time()
|
||||
for action_state in self.action_manager.action_state_list:
|
||||
if action_state.last_change_time == 0:
|
||||
continue
|
||||
|
||||
if now - action_state.last_change_time > 180:
|
||||
if action_state.regression_count >= 3:
|
||||
continue
|
||||
|
||||
logger.info(f"chat {action_state.chat_id} 开始动作回归, 这是第 {action_state.regression_count + 1} 次")
|
||||
await action_state.regress_action()
|
||||
|
||||
|
||||
class ActionManager:
|
||||
def __init__(self):
|
||||
self.action_state_list: list[ChatAction] = []
|
||||
"""当前动作状态"""
|
||||
self.task_started: bool = False
|
||||
|
||||
async def start(self):
|
||||
"""启动动作回归后台任务"""
|
||||
if self.task_started:
|
||||
return
|
||||
|
||||
logger.info("启动动作回归任务...")
|
||||
task = ActionRegressionTask(self)
|
||||
await async_task_manager.add_task(task)
|
||||
self.task_started = True
|
||||
logger.info("动作回归任务已启动")
|
||||
|
||||
def get_action_state_by_chat_id(self, chat_id: str) -> ChatAction:
|
||||
for action_state in self.action_state_list:
|
||||
if action_state.chat_id == chat_id:
|
||||
return action_state
|
||||
|
||||
new_action_state = ChatAction(chat_id)
|
||||
self.action_state_list.append(new_action_state)
|
||||
return new_action_state
|
||||
|
||||
def reset_action_state_by_chat_id(self, chat_id: str):
|
||||
for action_state in self.action_state_list:
|
||||
if action_state.chat_id == chat_id:
|
||||
action_state.hand_action = "双手放在桌面"
|
||||
action_state.upper_body_action = "坐着"
|
||||
action_state.head_action = "注视摄像机"
|
||||
action_state.regression_count = 0
|
||||
return
|
||||
self.action_state_list.append(ChatAction(chat_id))
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
action_manager = ActionManager()
|
||||
"""全局动作管理器"""
|
||||
363
src/mais4u/mais4u_chat/s4u_mood_manager.py
Normal file
363
src/mais4u/mais4u_chat/s4u_mood_manager.py
Normal file
@@ -0,0 +1,363 @@
|
||||
import asyncio
|
||||
import json
|
||||
import time
|
||||
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.manager.async_task_manager import AsyncTask, async_task_manager
|
||||
from src.plugin_system.apis import send_api
|
||||
|
||||
logger = get_logger("mood")
|
||||
|
||||
|
||||
async def send_joy_action(chat_id: str):
|
||||
action_content = {"action": "Joy_eye", "data": 1.0}
|
||||
await send_api.custom_to_stream(message_type="face_emotion", content=action_content, stream_id=chat_id)
|
||||
logger.info(f"[{chat_id}] 已发送 Joy 动作: {action_content}")
|
||||
|
||||
await asyncio.sleep(5.0)
|
||||
|
||||
end_action_content = {"action": "Joy_eye", "data": 0.0}
|
||||
await send_api.custom_to_stream(message_type="face_emotion", content=end_action_content, stream_id=chat_id)
|
||||
logger.info(f"[{chat_id}] 已发送 Joy 结束动作: {end_action_content}")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是直播间里正在进行的对话
|
||||
|
||||
{indentify_block}
|
||||
你刚刚的情绪状态是:{mood_state}
|
||||
|
||||
现在,发送了消息,引起了你的注意,你对其进行了阅读和思考,请你输出一句话描述你新的情绪状态,不要输出任何其他内容
|
||||
请只输出情绪状态,不要输出其他内容:
|
||||
""",
|
||||
"change_mood_prompt",
|
||||
)
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是直播间里最近的对话
|
||||
|
||||
{indentify_block}
|
||||
你之前的情绪状态是:{mood_state}
|
||||
|
||||
距离你上次关注直播间消息已经过去了一段时间,你冷静了下来,请你输出一句话描述你现在的情绪状态
|
||||
请只输出情绪状态,不要输出其他内容:
|
||||
""",
|
||||
"regress_mood_prompt",
|
||||
)
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是直播间里正在进行的对话
|
||||
|
||||
{indentify_block}
|
||||
你刚刚的情绪状态是:{mood_state}
|
||||
具体来说,从1-10分,你的情绪状态是:
|
||||
喜(Joy): {joy}
|
||||
怒(Anger): {anger}
|
||||
哀(Sorrow): {sorrow}
|
||||
乐(Pleasure): {pleasure}
|
||||
惧(Fear): {fear}
|
||||
|
||||
现在,发送了消息,引起了你的注意,你对其进行了阅读和思考。请基于对话内容,评估你新的情绪状态。
|
||||
请以JSON格式输出你新的情绪状态,包含“喜怒哀乐惧”五个维度,每个维度的取值范围为1-10。
|
||||
键值请使用英文: "joy", "anger", "sorrow", "pleasure", "fear".
|
||||
例如: {{"joy": 5, "anger": 1, "sorrow": 1, "pleasure": 5, "fear": 1}}
|
||||
不要输出任何其他内容,只输出JSON。
|
||||
""",
|
||||
"change_mood_numerical_prompt",
|
||||
)
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是直播间里最近的对话
|
||||
|
||||
{indentify_block}
|
||||
你之前的情绪状态是:{mood_state}
|
||||
具体来说,从1-10分,你的情绪状态是:
|
||||
喜(Joy): {joy}
|
||||
怒(Anger): {anger}
|
||||
哀(Sorrow): {sorrow}
|
||||
乐(Pleasure): {pleasure}
|
||||
惧(Fear): {fear}
|
||||
|
||||
距离你上次关注直播间消息已经过去了一段时间,你冷静了下来。请基于此,评估你现在的情绪状态。
|
||||
请以JSON格式输出你新的情绪状态,包含“喜怒哀乐惧”五个维度,每个维度的取值范围为1-10。
|
||||
键值请使用英文: "joy", "anger", "sorrow", "pleasure", "fear".
|
||||
例如: {{"joy": 5, "anger": 1, "sorrow": 1, "pleasure": 5, "fear": 1}}
|
||||
不要输出任何其他内容,只输出JSON。
|
||||
""",
|
||||
"regress_mood_numerical_prompt",
|
||||
)
|
||||
|
||||
|
||||
class ChatMood:
|
||||
def __init__(self, chat_id: str):
|
||||
self.chat_id: str = chat_id
|
||||
self.mood_state: str = "感觉很平静"
|
||||
self.mood_values: dict[str, int] = {"joy": 5, "anger": 1, "sorrow": 1, "pleasure": 5, "fear": 1}
|
||||
|
||||
self.regression_count: int = 0
|
||||
|
||||
self.mood_model = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.7,
|
||||
request_type="mood_text",
|
||||
)
|
||||
self.mood_model_numerical = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.4,
|
||||
request_type="mood_numerical",
|
||||
)
|
||||
|
||||
self.last_change_time = 0
|
||||
|
||||
def _parse_numerical_mood(self, response: str) -> dict[str, int] | None:
|
||||
try:
|
||||
# The LLM might output markdown with json inside
|
||||
if "```json" in response:
|
||||
response = response.split("```json")[1].split("```")[0]
|
||||
elif "```" in response:
|
||||
response = response.split("```")[1].split("```")[0]
|
||||
|
||||
data = json.loads(response)
|
||||
|
||||
# Validate
|
||||
required_keys = {"joy", "anger", "sorrow", "pleasure", "fear"}
|
||||
if not required_keys.issubset(data.keys()):
|
||||
logger.warning(f"Numerical mood response missing keys: {response}")
|
||||
return None
|
||||
|
||||
for key in required_keys:
|
||||
value = data[key]
|
||||
if not isinstance(value, int) or not (1 <= value <= 10):
|
||||
logger.warning(f"Numerical mood response invalid value for {key}: {value} in {response}")
|
||||
return None
|
||||
|
||||
return {key: data[key] for key in required_keys}
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(f"Failed to parse numerical mood JSON: {response}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing numerical mood: {e}, response: {response}")
|
||||
return None
|
||||
|
||||
async def update_mood_by_message(self, message: MessageRecv):
|
||||
self.regression_count = 0
|
||||
|
||||
message_time = message.message_info.time
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
timestamp_end=message_time,
|
||||
limit=15,
|
||||
limit_mode="last",
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
async def _update_text_mood():
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"change_mood_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
mood_state=self.mood_state,
|
||||
)
|
||||
logger.debug(f"text mood prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
|
||||
logger.info(f"text mood response: {response}")
|
||||
logger.debug(f"text mood reasoning_content: {reasoning_content}")
|
||||
return response
|
||||
|
||||
async def _update_numerical_mood():
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"change_mood_numerical_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
mood_state=self.mood_state,
|
||||
joy=self.mood_values["joy"],
|
||||
anger=self.mood_values["anger"],
|
||||
sorrow=self.mood_values["sorrow"],
|
||||
pleasure=self.mood_values["pleasure"],
|
||||
fear=self.mood_values["fear"],
|
||||
)
|
||||
logger.info(f"numerical mood prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model_numerical.generate_response_async(
|
||||
prompt=prompt
|
||||
)
|
||||
logger.info(f"numerical mood response: {response}")
|
||||
logger.debug(f"numerical mood reasoning_content: {reasoning_content}")
|
||||
return self._parse_numerical_mood(response)
|
||||
|
||||
results = await asyncio.gather(_update_text_mood(), _update_numerical_mood())
|
||||
text_mood_response, numerical_mood_response = results
|
||||
|
||||
if text_mood_response:
|
||||
self.mood_state = text_mood_response
|
||||
|
||||
if numerical_mood_response:
|
||||
self.mood_values = numerical_mood_response
|
||||
if self.mood_values.get("joy", 0) > 5:
|
||||
asyncio.create_task(send_joy_action(self.chat_id))
|
||||
|
||||
self.last_change_time = message_time
|
||||
|
||||
async def regress_mood(self):
|
||||
message_time = time.time()
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
timestamp_end=message_time,
|
||||
limit=15,
|
||||
limit_mode="last",
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
async def _regress_text_mood():
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"regress_mood_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
mood_state=self.mood_state,
|
||||
)
|
||||
logger.debug(f"text regress prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
|
||||
logger.info(f"text regress response: {response}")
|
||||
logger.debug(f"text regress reasoning_content: {reasoning_content}")
|
||||
return response
|
||||
|
||||
async def _regress_numerical_mood():
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"regress_mood_numerical_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
mood_state=self.mood_state,
|
||||
joy=self.mood_values["joy"],
|
||||
anger=self.mood_values["anger"],
|
||||
sorrow=self.mood_values["sorrow"],
|
||||
pleasure=self.mood_values["pleasure"],
|
||||
fear=self.mood_values["fear"],
|
||||
)
|
||||
logger.debug(f"numerical regress prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model_numerical.generate_response_async(
|
||||
prompt=prompt
|
||||
)
|
||||
logger.info(f"numerical regress response: {response}")
|
||||
logger.debug(f"numerical regress reasoning_content: {reasoning_content}")
|
||||
return self._parse_numerical_mood(response)
|
||||
|
||||
results = await asyncio.gather(_regress_text_mood(), _regress_numerical_mood())
|
||||
text_mood_response, numerical_mood_response = results
|
||||
|
||||
if text_mood_response:
|
||||
self.mood_state = text_mood_response
|
||||
|
||||
if numerical_mood_response:
|
||||
self.mood_values = numerical_mood_response
|
||||
if self.mood_values.get("joy", 0) > 5:
|
||||
asyncio.create_task(send_joy_action(self.chat_id))
|
||||
|
||||
self.regression_count += 1
|
||||
|
||||
|
||||
class MoodRegressionTask(AsyncTask):
|
||||
def __init__(self, mood_manager: "MoodManager"):
|
||||
super().__init__(task_name="MoodRegressionTask", run_interval=30)
|
||||
self.mood_manager = mood_manager
|
||||
|
||||
async def run(self):
|
||||
logger.debug("Running mood regression task...")
|
||||
now = time.time()
|
||||
for mood in self.mood_manager.mood_list:
|
||||
if mood.last_change_time == 0:
|
||||
continue
|
||||
|
||||
if now - mood.last_change_time > 180:
|
||||
if mood.regression_count >= 3:
|
||||
continue
|
||||
|
||||
logger.info(f"chat {mood.chat_id} 开始情绪回归, 这是第 {mood.regression_count + 1} 次")
|
||||
await mood.regress_mood()
|
||||
|
||||
|
||||
class MoodManager:
|
||||
def __init__(self):
|
||||
self.mood_list: list[ChatMood] = []
|
||||
"""当前情绪状态"""
|
||||
self.task_started: bool = False
|
||||
|
||||
async def start(self):
|
||||
"""启动情绪回归后台任务"""
|
||||
if self.task_started:
|
||||
return
|
||||
|
||||
logger.info("启动情绪回归任务...")
|
||||
task = MoodRegressionTask(self)
|
||||
await async_task_manager.add_task(task)
|
||||
self.task_started = True
|
||||
logger.info("情绪回归任务已启动")
|
||||
|
||||
def get_mood_by_chat_id(self, chat_id: str) -> ChatMood:
|
||||
for mood in self.mood_list:
|
||||
if mood.chat_id == chat_id:
|
||||
return mood
|
||||
|
||||
new_mood = ChatMood(chat_id)
|
||||
self.mood_list.append(new_mood)
|
||||
return new_mood
|
||||
|
||||
def reset_mood_by_chat_id(self, chat_id: str):
|
||||
for mood in self.mood_list:
|
||||
if mood.chat_id == chat_id:
|
||||
mood.mood_state = "感觉很平静"
|
||||
mood.regression_count = 0
|
||||
return
|
||||
self.mood_list.append(ChatMood(chat_id))
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
mood_manager = MoodManager()
|
||||
"""全局情绪管理器"""
|
||||
@@ -1,7 +1,18 @@
|
||||
import asyncio
|
||||
import math
|
||||
from typing import Tuple
|
||||
|
||||
from src.chat.memory_system.Hippocampus import hippocampus_manager
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.message_receive.storage import MessageStorage
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
from src.chat.utils.timer_calculator import Timer
|
||||
from src.chat.utils.utils import is_mentioned_bot_in_message
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.mais4u.mais4u_chat.body_emotion_action_manager import action_manager
|
||||
from src.mais4u.mais4u_chat.s4u_mood_manager import mood_manager
|
||||
|
||||
from .s4u_chat import get_s4u_chat_manager
|
||||
|
||||
|
||||
@@ -10,6 +21,42 @@ from .s4u_chat import get_s4u_chat_manager
|
||||
logger = get_logger("chat")
|
||||
|
||||
|
||||
async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
|
||||
"""计算消息的兴趣度
|
||||
|
||||
Args:
|
||||
message: 待处理的消息对象
|
||||
|
||||
Returns:
|
||||
Tuple[float, bool]: (兴趣度, 是否被提及)
|
||||
"""
|
||||
is_mentioned, _ = is_mentioned_bot_in_message(message)
|
||||
interested_rate = 0.0
|
||||
|
||||
if global_config.memory.enable_memory:
|
||||
with Timer("记忆激活"):
|
||||
interested_rate = await hippocampus_manager.get_activate_from_text(
|
||||
message.processed_plain_text,
|
||||
fast_retrieval=True,
|
||||
)
|
||||
logger.debug(f"记忆激活率: {interested_rate:.2f}")
|
||||
|
||||
text_len = len(message.processed_plain_text)
|
||||
# 根据文本长度调整兴趣度,长度越大兴趣度越高,但增长率递减,最低0.01,最高0.05
|
||||
# 采用对数函数实现递减增长
|
||||
|
||||
base_interest = 0.01 + (0.05 - 0.01) * (math.log10(text_len + 1) / math.log10(1000 + 1))
|
||||
base_interest = min(max(base_interest, 0.01), 0.05)
|
||||
|
||||
interested_rate += base_interest
|
||||
|
||||
if is_mentioned:
|
||||
interest_increase_on_mention = 1
|
||||
interested_rate += interest_increase_on_mention
|
||||
|
||||
return interested_rate, is_mentioned
|
||||
|
||||
|
||||
class S4UMessageProcessor:
|
||||
"""心流处理器,负责处理接收到的消息并计算兴趣度"""
|
||||
|
||||
@@ -53,5 +100,13 @@ class S4UMessageProcessor:
|
||||
else:
|
||||
await s4u_chat.add_message(message)
|
||||
|
||||
interested_rate, _ = await _calculate_interest(message)
|
||||
|
||||
chat_mood = mood_manager.get_mood_by_chat_id(chat.stream_id)
|
||||
asyncio.create_task(chat_mood.update_mood_by_message(message))
|
||||
chat_action = action_manager.get_action_state_by_chat_id(chat.stream_id)
|
||||
asyncio.create_task(chat_action.update_action_by_message(message))
|
||||
# asyncio.create_task(chat_action.update_facial_expression_by_message(message, interested_rate))
|
||||
|
||||
# 7. 日志记录
|
||||
logger.info(f"[S4U]{userinfo.user_nickname}:{message.processed_plain_text}")
|
||||
|
||||
@@ -17,11 +17,6 @@ logger = get_logger("prompt")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt("你正在qq群里聊天,下面是群里在聊的内容:", "chat_target_group1")
|
||||
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
|
||||
Prompt("在群里聊天", "chat_target_group2")
|
||||
Prompt("和{sender_name}私聊", "chat_target_private2")
|
||||
|
||||
Prompt("\n你有以下这些**知识**:\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
|
||||
Prompt("\n关于你们的关系,你需要知道:\n{relation_info}\n", "relation_prompt")
|
||||
Prompt("你回想起了一些事情:\n{memory_info}\n", "memory_prompt")
|
||||
@@ -252,7 +247,7 @@ def weighted_sample_no_replacement(items, weights, k) -> list:
|
||||
2. 不会重复选中同一个元素
|
||||
"""
|
||||
selected = []
|
||||
pool = list(zip(items, weights))
|
||||
pool = list(zip(items, weights, strict=False))
|
||||
for _ in range(min(k, len(pool))):
|
||||
total = sum(w for _, w in pool)
|
||||
r = random.uniform(0, total)
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import os
|
||||
from typing import AsyncGenerator
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.mais4u.openai_client import AsyncOpenAIClient
|
||||
from src.config.config import global_config
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
@@ -36,7 +35,6 @@ class S4UStreamGenerator:
|
||||
raise ValueError("`replyer_1` 在配置文件中缺少 `model_name` 字段")
|
||||
self.replyer_1_config = replyer_1_config
|
||||
|
||||
self.model_sum = LLMRequest(model=global_config.model.memory_summary, temperature=0.7, request_type="relation")
|
||||
self.current_model_name = "unknown model"
|
||||
self.partial_response = ""
|
||||
|
||||
|
||||
@@ -1,296 +0,0 @@
|
||||
import asyncio
|
||||
import math
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Tuple
|
||||
|
||||
from ..config.config import global_config
|
||||
from ..common.logger import get_logger
|
||||
from ..manager.async_task_manager import AsyncTask
|
||||
from ..individuality.individuality import get_individuality
|
||||
|
||||
logger = get_logger("mood")
|
||||
|
||||
|
||||
@dataclass
|
||||
class MoodState:
|
||||
valence: float
|
||||
"""愉悦度 (-1.0 到 1.0),-1表示极度负面,1表示极度正面"""
|
||||
arousal: float
|
||||
"""唤醒度 (-1.0 到 1.0),-1表示抑制,1表示兴奋"""
|
||||
text: str
|
||||
"""心情的文本描述"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class MoodChangeHistory:
|
||||
valence_direction_factor: int
|
||||
"""愉悦度变化的系数(正为增益,负为抑制)"""
|
||||
arousal_direction_factor: int
|
||||
"""唤醒度变化的系数(正为增益,负为抑制)"""
|
||||
|
||||
|
||||
class MoodUpdateTask(AsyncTask):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
task_name="Mood Update Task",
|
||||
wait_before_start=global_config.mood.mood_update_interval,
|
||||
run_interval=global_config.mood.mood_update_interval,
|
||||
)
|
||||
|
||||
# 从配置文件获取衰减率
|
||||
self.decay_rate_valence: float = 1 - global_config.mood.mood_decay_rate
|
||||
"""愉悦度衰减率"""
|
||||
self.decay_rate_arousal: float = 1 - global_config.mood.mood_decay_rate
|
||||
"""唤醒度衰减率"""
|
||||
|
||||
self.last_update = time.time()
|
||||
"""上次更新时间"""
|
||||
|
||||
async def run(self):
|
||||
current_time = time.time()
|
||||
time_diff = current_time - self.last_update
|
||||
agreeableness_factor = 1 # 宜人性系数
|
||||
agreeableness_bias = 0 # 宜人性偏置
|
||||
neuroticism_factor = 0.5 # 神经质系数
|
||||
# 获取人格特质
|
||||
personality = get_individuality().personality
|
||||
if personality:
|
||||
# 神经质:影响情绪变化速度
|
||||
neuroticism_factor = 1 + (personality.neuroticism - 0.5) * 0.4
|
||||
agreeableness_factor = 1 + (personality.agreeableness - 0.5) * 0.4
|
||||
|
||||
# 宜人性:影响情绪基准线
|
||||
if personality.agreeableness < 0.2:
|
||||
agreeableness_bias = (personality.agreeableness - 0.2) * 0.5
|
||||
elif personality.agreeableness > 0.8:
|
||||
agreeableness_bias = (personality.agreeableness - 0.8) * 0.5
|
||||
else:
|
||||
agreeableness_bias = 0
|
||||
|
||||
# 分别计算正向和负向的衰减率
|
||||
if mood_manager.current_mood.valence >= 0:
|
||||
# 正向情绪衰减
|
||||
decay_rate_positive = self.decay_rate_valence * (1 / agreeableness_factor)
|
||||
valence_target = 0 + agreeableness_bias
|
||||
new_valence = valence_target + (mood_manager.current_mood.valence - valence_target) * math.exp(
|
||||
-decay_rate_positive * time_diff * neuroticism_factor
|
||||
)
|
||||
else:
|
||||
# 负向情绪衰减
|
||||
decay_rate_negative = self.decay_rate_valence * agreeableness_factor
|
||||
valence_target = 0 + agreeableness_bias
|
||||
new_valence = valence_target + (mood_manager.current_mood.valence - valence_target) * math.exp(
|
||||
-decay_rate_negative * time_diff * neuroticism_factor
|
||||
)
|
||||
|
||||
# Arousal 向中性(0)回归
|
||||
arousal_target = 0
|
||||
new_arousal = arousal_target + (mood_manager.current_mood.arousal - arousal_target) * math.exp(
|
||||
-self.decay_rate_arousal * time_diff * neuroticism_factor
|
||||
)
|
||||
|
||||
mood_manager.set_current_mood(new_valence, new_arousal)
|
||||
|
||||
self.last_update = current_time
|
||||
|
||||
|
||||
class MoodPrintTask(AsyncTask):
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
task_name="Mood Print Task",
|
||||
wait_before_start=60,
|
||||
run_interval=60,
|
||||
)
|
||||
|
||||
async def run(self):
|
||||
# 打印当前心情
|
||||
logger.info(
|
||||
f"愉悦度: {mood_manager.current_mood.valence:.2f}, "
|
||||
f"唤醒度: {mood_manager.current_mood.arousal:.2f}, "
|
||||
f"心情: {mood_manager.current_mood.text}"
|
||||
)
|
||||
|
||||
|
||||
class MoodManager:
|
||||
# TODO: 改进,使用具有实验支持的新情绪模型
|
||||
|
||||
EMOTION_FACTOR_MAP: Dict[str, Tuple[float, float]] = {
|
||||
"开心": (0.21, 0.6),
|
||||
"害羞": (0.15, 0.2),
|
||||
"愤怒": (-0.24, 0.8),
|
||||
"恐惧": (-0.21, 0.7),
|
||||
"悲伤": (-0.21, 0.3),
|
||||
"厌恶": (-0.12, 0.4),
|
||||
"惊讶": (0.06, 0.7),
|
||||
"困惑": (0.0, 0.6),
|
||||
"平静": (0.03, 0.5),
|
||||
}
|
||||
"""
|
||||
情绪词映射表 {mood: (valence, arousal)}
|
||||
将情绪描述词映射到愉悦度和唤醒度的元组
|
||||
"""
|
||||
|
||||
EMOTION_POINT_MAP: Dict[Tuple[float, float], str] = {
|
||||
# 第一象限:高唤醒,正愉悦
|
||||
(0.5, 0.4): "兴奋",
|
||||
(0.3, 0.6): "快乐",
|
||||
(0.2, 0.3): "满足",
|
||||
# 第二象限:高唤醒,负愉悦
|
||||
(-0.5, 0.4): "愤怒",
|
||||
(-0.3, 0.6): "焦虑",
|
||||
(-0.2, 0.3): "烦躁",
|
||||
# 第三象限:低唤醒,负愉悦
|
||||
(-0.5, -0.4): "悲伤",
|
||||
(-0.3, -0.3): "疲倦",
|
||||
(-0.4, -0.7): "疲倦",
|
||||
# 第四象限:低唤醒,正愉悦
|
||||
(0.2, -0.1): "平静",
|
||||
(0.3, -0.2): "安宁",
|
||||
(0.5, -0.4): "放松",
|
||||
}
|
||||
"""
|
||||
情绪文本映射表 {(valence, arousal): mood}
|
||||
将量化的情绪状态元组映射到文本描述
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.current_mood = MoodState(
|
||||
valence=0.0,
|
||||
arousal=0.0,
|
||||
text="平静",
|
||||
)
|
||||
"""当前情绪状态"""
|
||||
|
||||
self.mood_change_history: MoodChangeHistory = MoodChangeHistory(
|
||||
valence_direction_factor=0,
|
||||
arousal_direction_factor=0,
|
||||
)
|
||||
"""情绪变化历史"""
|
||||
|
||||
self._lock = asyncio.Lock()
|
||||
"""异步锁,用于保护线程安全"""
|
||||
|
||||
def set_current_mood(self, new_valence: float, new_arousal: float):
|
||||
"""
|
||||
设置当前情绪状态
|
||||
:param new_valence: 新的愉悦度
|
||||
:param new_arousal: 新的唤醒度
|
||||
"""
|
||||
# 限制范围
|
||||
self.current_mood.valence = max(-1.0, min(new_valence, 1.0))
|
||||
self.current_mood.arousal = max(-1.0, min(new_arousal, 1.0))
|
||||
|
||||
closest_mood = None
|
||||
min_distance = float("inf")
|
||||
|
||||
for (v, a), text in self.EMOTION_POINT_MAP.items():
|
||||
# 计算当前情绪状态与每个情绪文本的欧氏距离
|
||||
distance = math.sqrt((self.current_mood.valence - v) ** 2 + (self.current_mood.arousal - a) ** 2)
|
||||
if distance < min_distance:
|
||||
min_distance = distance
|
||||
closest_mood = text
|
||||
|
||||
if closest_mood:
|
||||
self.current_mood.text = closest_mood
|
||||
|
||||
def update_current_mood(self, valence_delta: float, arousal_delta: float):
|
||||
"""
|
||||
根据愉悦度和唤醒度变化量更新当前情绪状态
|
||||
:param valence_delta: 愉悦度变化量
|
||||
:param arousal_delta: 唤醒度变化量
|
||||
"""
|
||||
# 计算连续增益/抑制
|
||||
# 规则:多次相同方向的变化会有更大的影响系数,反方向的变化会清零影响系数(系数的正负号由变化方向决定)
|
||||
if valence_delta * self.mood_change_history.valence_direction_factor > 0:
|
||||
# 如果方向相同,则根据变化方向改变系数
|
||||
if valence_delta > 0:
|
||||
self.mood_change_history.valence_direction_factor += 1 # 若为正向,则增加
|
||||
else:
|
||||
self.mood_change_history.valence_direction_factor -= 1 # 若为负向,则减少
|
||||
else:
|
||||
# 如果方向不同,则重置计数
|
||||
self.mood_change_history.valence_direction_factor = 0
|
||||
|
||||
if arousal_delta * self.mood_change_history.arousal_direction_factor > 0:
|
||||
# 如果方向相同,则根据变化方向改变系数
|
||||
if arousal_delta > 0:
|
||||
self.mood_change_history.arousal_direction_factor += 1 # 若为正向,则增加计数
|
||||
else:
|
||||
self.mood_change_history.arousal_direction_factor -= 1 # 若为负向,则减少计数
|
||||
else:
|
||||
# 如果方向不同,则重置计数
|
||||
self.mood_change_history.arousal_direction_factor = 0
|
||||
|
||||
# 计算增益/抑制的结果
|
||||
# 规则:如果当前情绪状态与变化方向相同,则增益;否则抑制
|
||||
if self.current_mood.valence * self.mood_change_history.valence_direction_factor > 0:
|
||||
valence_delta = valence_delta * (1.01 ** abs(self.mood_change_history.valence_direction_factor))
|
||||
else:
|
||||
valence_delta = valence_delta * (0.99 ** abs(self.mood_change_history.valence_direction_factor))
|
||||
|
||||
if self.current_mood.arousal * self.mood_change_history.arousal_direction_factor > 0:
|
||||
arousal_delta = arousal_delta * (1.01 ** abs(self.mood_change_history.arousal_direction_factor))
|
||||
else:
|
||||
arousal_delta = arousal_delta * (0.99 ** abs(self.mood_change_history.arousal_direction_factor))
|
||||
|
||||
self.set_current_mood(
|
||||
new_valence=self.current_mood.valence + valence_delta,
|
||||
new_arousal=self.current_mood.arousal + arousal_delta,
|
||||
)
|
||||
|
||||
def get_mood_prompt(self) -> str:
|
||||
"""
|
||||
根据当前情绪状态生成提示词
|
||||
"""
|
||||
base_prompt = f"当前心情:{self.current_mood.text}。"
|
||||
|
||||
# 根据情绪状态添加额外的提示信息
|
||||
if self.current_mood.valence > 0.5:
|
||||
base_prompt += "你现在心情很好,"
|
||||
elif self.current_mood.valence < -0.5:
|
||||
base_prompt += "你现在心情不太好,"
|
||||
|
||||
if self.current_mood.arousal > 0.4:
|
||||
base_prompt += "情绪比较激动。"
|
||||
elif self.current_mood.arousal < -0.4:
|
||||
base_prompt += "情绪比较平静。"
|
||||
|
||||
return base_prompt
|
||||
|
||||
def get_arousal_multiplier(self) -> float:
|
||||
"""
|
||||
根据当前情绪状态返回唤醒度乘数
|
||||
"""
|
||||
if self.current_mood.arousal > 0.4:
|
||||
multiplier = 1 + min(0.15, (self.current_mood.arousal - 0.4) / 3)
|
||||
return multiplier
|
||||
elif self.current_mood.arousal < -0.4:
|
||||
multiplier = 1 - min(0.15, ((0 - self.current_mood.arousal) - 0.4) / 3)
|
||||
return multiplier
|
||||
return 1.0
|
||||
|
||||
def update_mood_from_emotion(self, emotion: str, intensity: float = 1.0) -> None:
|
||||
"""
|
||||
根据情绪词更新心情状态
|
||||
:param emotion: 情绪词(如'开心', '悲伤'等位于self.EMOTION_FACTOR_MAP中的键)
|
||||
:param intensity: 情绪强度(0.0-1.0)
|
||||
"""
|
||||
if emotion not in self.EMOTION_FACTOR_MAP:
|
||||
logger.error(f"[情绪更新] 未知情绪词: {emotion}")
|
||||
return
|
||||
|
||||
valence_change, arousal_change = self.EMOTION_FACTOR_MAP[emotion]
|
||||
old_valence = self.current_mood.valence
|
||||
old_arousal = self.current_mood.arousal
|
||||
old_mood = self.current_mood.text
|
||||
|
||||
self.update_current_mood(valence_change, arousal_change) # 更新当前情绪状态
|
||||
|
||||
logger.info(
|
||||
f"[情绪变化] {emotion}(强度:{intensity:.2f}) | 愉悦度:{old_valence:.2f}->{self.current_mood.valence:.2f}, 唤醒度:{old_arousal:.2f}->{self.current_mood.arousal:.2f} | 心情:{old_mood}->{self.current_mood.text}"
|
||||
)
|
||||
|
||||
|
||||
mood_manager = MoodManager()
|
||||
"""全局情绪管理器"""
|
||||
227
src/mood/mood_manager.py
Normal file
227
src/mood/mood_manager.py
Normal file
@@ -0,0 +1,227 @@
|
||||
import math
|
||||
import random
|
||||
import time
|
||||
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from ..common.logger import get_logger
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
|
||||
from src.config.config import global_config
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.manager.async_task_manager import AsyncTask, async_task_manager
|
||||
|
||||
logger = get_logger("mood")
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是群里正在进行的聊天记录
|
||||
|
||||
{indentify_block}
|
||||
你刚刚的情绪状态是:{mood_state}
|
||||
|
||||
现在,发送了消息,引起了你的注意,你对其进行了阅读和思考,请你输出一句话描述你新的情绪状态
|
||||
请只输出情绪状态,不要输出其他内容:
|
||||
""",
|
||||
"change_mood_prompt",
|
||||
)
|
||||
Prompt(
|
||||
"""
|
||||
{chat_talking_prompt}
|
||||
以上是群里最近的聊天记录
|
||||
|
||||
{indentify_block}
|
||||
你之前的情绪状态是:{mood_state}
|
||||
|
||||
距离你上次关注群里消息已经过去了一段时间,你冷静了下来,请你输出一句话描述你现在的情绪状态
|
||||
请只输出情绪状态,不要输出其他内容:
|
||||
""",
|
||||
"regress_mood_prompt",
|
||||
)
|
||||
|
||||
|
||||
class ChatMood:
|
||||
def __init__(self, chat_id: str):
|
||||
self.chat_id: str = chat_id
|
||||
self.mood_state: str = "感觉很平静"
|
||||
|
||||
self.regression_count: int = 0
|
||||
|
||||
self.mood_model = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.7,
|
||||
request_type="mood",
|
||||
)
|
||||
|
||||
self.last_change_time = 0
|
||||
|
||||
async def update_mood_by_message(self, message: MessageRecv, interested_rate: float):
|
||||
self.regression_count = 0
|
||||
|
||||
during_last_time = message.message_info.time - self.last_change_time
|
||||
|
||||
base_probability = 0.05
|
||||
time_multiplier = 4 * (1 - math.exp(-0.01 * during_last_time))
|
||||
|
||||
if interested_rate <= 0:
|
||||
interest_multiplier = 0
|
||||
else:
|
||||
interest_multiplier = 3 * math.pow(interested_rate, 0.25)
|
||||
|
||||
logger.info(
|
||||
f"base_probability: {base_probability}, time_multiplier: {time_multiplier}, interest_multiplier: {interest_multiplier}"
|
||||
)
|
||||
update_probability = min(1.0, base_probability * time_multiplier * interest_multiplier)
|
||||
|
||||
if random.random() > update_probability:
|
||||
return
|
||||
|
||||
message_time = message.message_info.time
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
timestamp_end=message_time,
|
||||
limit=15,
|
||||
limit_mode="last",
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"change_mood_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
mood_state=self.mood_state,
|
||||
)
|
||||
|
||||
logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
|
||||
logger.info(f"response: {response}")
|
||||
logger.info(f"reasoning_content: {reasoning_content}")
|
||||
|
||||
self.mood_state = response
|
||||
|
||||
self.last_change_time = message_time
|
||||
|
||||
async def regress_mood(self):
|
||||
message_time = time.time()
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
timestamp_end=message_time,
|
||||
limit=15,
|
||||
limit_mode="last",
|
||||
)
|
||||
chat_talking_prompt = build_readable_messages(
|
||||
message_list_before_now,
|
||||
replace_bot_name=True,
|
||||
merge_messages=False,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
read_mark=0.0,
|
||||
truncate=True,
|
||||
show_actions=True,
|
||||
)
|
||||
|
||||
bot_name = global_config.bot.nickname
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
else:
|
||||
bot_nickname = ""
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"regress_mood_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
indentify_block=indentify_block,
|
||||
mood_state=self.mood_state,
|
||||
)
|
||||
|
||||
logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
|
||||
logger.info(f"response: {response}")
|
||||
logger.info(f"reasoning_content: {reasoning_content}")
|
||||
|
||||
self.mood_state = response
|
||||
|
||||
self.regression_count += 1
|
||||
|
||||
|
||||
class MoodRegressionTask(AsyncTask):
|
||||
def __init__(self, mood_manager: "MoodManager"):
|
||||
super().__init__(task_name="MoodRegressionTask", run_interval=30)
|
||||
self.mood_manager = mood_manager
|
||||
|
||||
async def run(self):
|
||||
logger.debug("Running mood regression task...")
|
||||
now = time.time()
|
||||
for mood in self.mood_manager.mood_list:
|
||||
if mood.last_change_time == 0:
|
||||
continue
|
||||
|
||||
if now - mood.last_change_time > 180:
|
||||
if mood.regression_count >= 3:
|
||||
continue
|
||||
|
||||
logger.info(f"chat {mood.chat_id} 开始情绪回归, 这是第 {mood.regression_count + 1} 次")
|
||||
await mood.regress_mood()
|
||||
|
||||
|
||||
class MoodManager:
|
||||
def __init__(self):
|
||||
self.mood_list: list[ChatMood] = []
|
||||
"""当前情绪状态"""
|
||||
self.task_started: bool = False
|
||||
|
||||
async def start(self):
|
||||
"""启动情绪回归后台任务"""
|
||||
if self.task_started:
|
||||
return
|
||||
|
||||
logger.info("启动情绪回归任务...")
|
||||
task = MoodRegressionTask(self)
|
||||
await async_task_manager.add_task(task)
|
||||
self.task_started = True
|
||||
logger.info("情绪回归任务已启动")
|
||||
|
||||
def get_mood_by_chat_id(self, chat_id: str) -> ChatMood:
|
||||
for mood in self.mood_list:
|
||||
if mood.chat_id == chat_id:
|
||||
return mood
|
||||
|
||||
new_mood = ChatMood(chat_id)
|
||||
self.mood_list.append(new_mood)
|
||||
return new_mood
|
||||
|
||||
def reset_mood_by_chat_id(self, chat_id: str):
|
||||
for mood in self.mood_list:
|
||||
if mood.chat_id == chat_id:
|
||||
mood.mood_state = "感觉很平静"
|
||||
mood.regression_count = 0
|
||||
return
|
||||
self.mood_list.append(ChatMood(chat_id))
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
mood_manager = MoodManager()
|
||||
"""全局情绪管理器"""
|
||||
@@ -2,6 +2,7 @@ import time
|
||||
import traceback
|
||||
import os
|
||||
import pickle
|
||||
import random
|
||||
from typing import List, Dict
|
||||
from src.config.config import global_config
|
||||
from src.common.logger import get_logger
|
||||
@@ -20,11 +21,13 @@ logger = get_logger("relationship_builder")
|
||||
# 消息段清理配置
|
||||
SEGMENT_CLEANUP_CONFIG = {
|
||||
"enable_cleanup": True, # 是否启用清理
|
||||
"max_segment_age_days": 7, # 消息段最大保存天数
|
||||
"max_segment_age_days": 3, # 消息段最大保存天数
|
||||
"max_segments_per_user": 10, # 每用户最大消息段数
|
||||
"cleanup_interval_hours": 1, # 清理间隔(小时)
|
||||
"cleanup_interval_hours": 0.5, # 清理间隔(小时)
|
||||
}
|
||||
|
||||
MAX_MESSAGE_COUNT = 80 / global_config.relationship.relation_frequency
|
||||
|
||||
|
||||
class RelationshipBuilder:
|
||||
"""关系构建器
|
||||
@@ -330,7 +333,7 @@ class RelationshipBuilder:
|
||||
for person_id, segments in self.person_engaged_cache.items():
|
||||
total_count = self._get_total_message_count(person_id)
|
||||
status_lines.append(f"用户 {person_id}:")
|
||||
status_lines.append(f" 总消息数:{total_count} ({total_count}/45)")
|
||||
status_lines.append(f" 总消息数:{total_count} ({total_count}/60)")
|
||||
status_lines.append(f" 消息段数:{len(segments)}")
|
||||
|
||||
for i, segment in enumerate(segments):
|
||||
@@ -384,7 +387,7 @@ class RelationshipBuilder:
|
||||
users_to_build_relationship = []
|
||||
for person_id, segments in self.person_engaged_cache.items():
|
||||
total_message_count = self._get_total_message_count(person_id)
|
||||
if total_message_count >= 45:
|
||||
if total_message_count >= MAX_MESSAGE_COUNT:
|
||||
users_to_build_relationship.append(person_id)
|
||||
logger.debug(
|
||||
f"{self.log_prefix} 用户 {person_id} 满足关系构建条件,总消息数:{total_message_count},消息段数:{len(segments)}"
|
||||
@@ -392,7 +395,7 @@ class RelationshipBuilder:
|
||||
elif total_message_count > 0:
|
||||
# 记录进度信息
|
||||
logger.debug(
|
||||
f"{self.log_prefix} 用户 {person_id} 进度:{total_message_count}/45 条消息,{len(segments)} 个消息段"
|
||||
f"{self.log_prefix} 用户 {person_id} 进度:{total_message_count}60 条消息,{len(segments)} 个消息段"
|
||||
)
|
||||
|
||||
# 2. 为满足条件的用户构建关系
|
||||
@@ -413,11 +416,28 @@ class RelationshipBuilder:
|
||||
|
||||
async def update_impression_on_segments(self, person_id: str, chat_id: str, segments: List[Dict[str, any]]):
|
||||
"""基于消息段更新用户印象"""
|
||||
logger.debug(f"开始为 {person_id} 基于 {len(segments)} 个消息段更新印象")
|
||||
original_segment_count = len(segments)
|
||||
logger.debug(f"开始为 {person_id} 基于 {original_segment_count} 个消息段更新印象")
|
||||
try:
|
||||
# 筛选要处理的消息段,每个消息段有10%的概率被丢弃
|
||||
segments_to_process = [s for s in segments if random.random() >= 0.1]
|
||||
|
||||
# 如果所有消息段都被丢弃,但原来有消息段,则至少保留一个(最新的)
|
||||
if not segments_to_process and segments:
|
||||
segments.sort(key=lambda x: x["end_time"], reverse=True)
|
||||
segments_to_process.append(segments[0])
|
||||
logger.debug("随机丢弃了所有消息段,强制保留最新的一个以进行处理。")
|
||||
|
||||
dropped_count = original_segment_count - len(segments_to_process)
|
||||
if dropped_count > 0:
|
||||
logger.info(f"为 {person_id} 随机丢弃了 {dropped_count} / {original_segment_count} 个消息段")
|
||||
|
||||
processed_messages = []
|
||||
|
||||
for i, segment in enumerate(segments):
|
||||
# 对筛选后的消息段进行排序,确保时间顺序
|
||||
segments_to_process.sort(key=lambda x: x["start_time"])
|
||||
|
||||
for segment in segments_to_process:
|
||||
start_time = segment["start_time"]
|
||||
end_time = segment["end_time"]
|
||||
start_date = time.strftime("%Y-%m-%d %H:%M", time.localtime(start_time))
|
||||
@@ -425,12 +445,12 @@ class RelationshipBuilder:
|
||||
# 获取该段的消息(包含边界)
|
||||
segment_messages = get_raw_msg_by_timestamp_with_chat_inclusive(self.chat_id, start_time, end_time)
|
||||
logger.debug(
|
||||
f"消息段 {i + 1}: {start_date} - {time.strftime('%Y-%m-%d %H:%M', time.localtime(end_time))}, 消息数: {len(segment_messages)}"
|
||||
f"消息段: {start_date} - {time.strftime('%Y-%m-%d %H:%M', time.localtime(end_time))}, 消息数: {len(segment_messages)}"
|
||||
)
|
||||
|
||||
if segment_messages:
|
||||
# 如果不是第一个消息段,在消息列表前添加间隔标识
|
||||
if i > 0:
|
||||
# 如果 processed_messages 不为空,说明这不是第一个被处理的消息段,在消息列表前添加间隔标识
|
||||
if processed_messages:
|
||||
# 创建一个特殊的间隔消息
|
||||
gap_message = {
|
||||
"time": start_time - 0.1, # 稍微早于段开始时间
|
||||
|
||||
@@ -120,27 +120,38 @@ class RelationshipFetcher:
|
||||
|
||||
# 按时间排序forgotten_points
|
||||
current_points.sort(key=lambda x: x[2])
|
||||
# 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大
|
||||
# 按权重加权随机抽取最多3个不重复的points,point[1]的值在1-10之间,权重越高被抽到概率越大
|
||||
if len(current_points) > 3:
|
||||
# point[1] 取值范围1-10,直接作为权重
|
||||
weights = [max(1, min(10, int(point[1]))) for point in current_points]
|
||||
points = random.choices(current_points, weights=weights, k=3)
|
||||
# 使用加权采样不放回,保证不重复
|
||||
indices = list(range(len(current_points)))
|
||||
points = []
|
||||
for _ in range(3):
|
||||
if not indices:
|
||||
break
|
||||
sub_weights = [weights[i] for i in indices]
|
||||
chosen_idx = random.choices(indices, weights=sub_weights, k=1)[0]
|
||||
points.append(current_points[chosen_idx])
|
||||
indices.remove(chosen_idx)
|
||||
else:
|
||||
points = current_points
|
||||
|
||||
# 构建points文本
|
||||
points_text = "\n".join([f"{point[2]}:{point[0]}" for point in points])
|
||||
|
||||
info_type = await self._build_fetch_query(person_id, target_message, chat_history)
|
||||
if info_type:
|
||||
await self._extract_single_info(person_id, info_type, person_name)
|
||||
# info_type = await self._build_fetch_query(person_id, target_message, chat_history)
|
||||
# if info_type:
|
||||
# await self._extract_single_info(person_id, info_type, person_name)
|
||||
|
||||
relation_info = self._organize_known_info()
|
||||
# relation_info = self._organize_known_info()
|
||||
|
||||
nickname_str = ""
|
||||
if person_name != nickname_str:
|
||||
nickname_str = f"(ta在{platform}上的昵称是{nickname_str})"
|
||||
|
||||
relation_info = ""
|
||||
|
||||
if short_impression and relation_info:
|
||||
if points_text:
|
||||
relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}。你还记得ta最近做的事:{points_text}"
|
||||
|
||||
@@ -20,7 +20,7 @@ logger = get_logger("relation")
|
||||
class RelationshipManager:
|
||||
def __init__(self):
|
||||
self.relationship_llm = LLMRequest(
|
||||
model=global_config.model.relation,
|
||||
model=global_config.model.utils,
|
||||
request_type="relationship", # 用于动作规划
|
||||
)
|
||||
|
||||
@@ -68,7 +68,7 @@ class RelationshipManager:
|
||||
short_impression = await person_info_manager.get_value(person_id, "short_impression")
|
||||
|
||||
current_points = await person_info_manager.get_value(person_id, "points") or []
|
||||
print(f"current_points: {current_points}")
|
||||
# print(f"current_points: {current_points}")
|
||||
if isinstance(current_points, str):
|
||||
try:
|
||||
current_points = json.loads(current_points)
|
||||
@@ -89,7 +89,7 @@ class RelationshipManager:
|
||||
points = current_points
|
||||
|
||||
# 构建points文本
|
||||
points_text = "\n".join([f"{point[2]}:{point[0]}\n" for point in points])
|
||||
points_text = "\n".join([f"{point[2]}:{point[0]}" for point in points])
|
||||
|
||||
nickname_str = await person_info_manager.get_value(person_id, "nickname")
|
||||
platform = await person_info_manager.get_value(person_id, "platform")
|
||||
@@ -250,10 +250,26 @@ class RelationshipManager:
|
||||
# 添加可读时间到每个point
|
||||
points_list = [(item["point"], float(item["weight"]), current_time) for item in points_data]
|
||||
|
||||
logger_str = f"了解了有关{person_name}的新印象:\n"
|
||||
for point in points_list:
|
||||
logger_str += f"{point[0]},重要性:{point[1]}\n"
|
||||
logger.info(logger_str)
|
||||
original_points_list = list(points_list)
|
||||
points_list.clear()
|
||||
discarded_count = 0
|
||||
|
||||
for point in original_points_list:
|
||||
weight = point[1]
|
||||
if weight < 3 and random.random() < 0.8: # 80% 概率丢弃
|
||||
discarded_count += 1
|
||||
elif weight < 5 and random.random() < 0.5: # 50% 概率丢弃
|
||||
discarded_count += 1
|
||||
else:
|
||||
points_list.append(point)
|
||||
|
||||
if points_list or discarded_count > 0:
|
||||
logger_str = f"了解了有关{person_name}的新印象:\n"
|
||||
for point in points_list:
|
||||
logger_str += f"{point[0]},重要性:{point[1]}\n"
|
||||
if discarded_count > 0:
|
||||
logger_str += f"({discarded_count} 条因重要性低被丢弃)\n"
|
||||
logger.info(logger_str)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"解析points JSON失败: {points}")
|
||||
@@ -344,19 +360,19 @@ class RelationshipManager:
|
||||
# 根据熟悉度,调整印象和简短印象的最大长度
|
||||
if know_times > 300:
|
||||
max_impression_length = 2000
|
||||
max_short_impression_length = 800
|
||||
max_short_impression_length = 400
|
||||
elif know_times > 100:
|
||||
max_impression_length = 1000
|
||||
max_short_impression_length = 500
|
||||
max_short_impression_length = 250
|
||||
elif know_times > 50:
|
||||
max_impression_length = 500
|
||||
max_short_impression_length = 300
|
||||
max_short_impression_length = 150
|
||||
elif know_times > 10:
|
||||
max_impression_length = 200
|
||||
max_short_impression_length = 100
|
||||
max_short_impression_length = 60
|
||||
else:
|
||||
max_impression_length = 100
|
||||
max_short_impression_length = 50
|
||||
max_short_impression_length = 30
|
||||
|
||||
# 根据好感度,调整印象和简短印象的最大长度
|
||||
attitude_multiplier = (abs(100 - attitude) / 100) + 1
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
count = emoji_api.get_count()
|
||||
"""
|
||||
|
||||
from typing import Optional, Tuple
|
||||
from typing import Optional, Tuple, List
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.emoji_system.emoji_manager import get_emoji_manager
|
||||
from src.chat.utils.utils_image import image_path_to_base64
|
||||
@@ -55,14 +55,20 @@ async def get_by_description(description: str) -> Optional[Tuple[str, str, str]]
|
||||
return None
|
||||
|
||||
|
||||
async def get_random() -> Optional[Tuple[str, str, str]]:
|
||||
"""随机获取表情包
|
||||
async def get_random(count: int = 1) -> Optional[List[Tuple[str, str, str]]]:
|
||||
"""随机获取指定数量的表情包
|
||||
|
||||
Args:
|
||||
count: 要获取的表情包数量,默认为1
|
||||
|
||||
Returns:
|
||||
Optional[Tuple[str, str, str]]: (base64编码, 表情包描述, 随机情感标签) 或 None
|
||||
Optional[List[Tuple[str, str, str]]]: 包含(base64编码, 表情包描述, 随机情感标签)的元组列表,如果失败则为None
|
||||
"""
|
||||
if count <= 0:
|
||||
return []
|
||||
|
||||
try:
|
||||
logger.info("[EmojiAPI] 随机获取表情包")
|
||||
logger.info(f"[EmojiAPI] 随机获取 {count} 个表情包")
|
||||
|
||||
emoji_manager = get_emoji_manager()
|
||||
all_emojis = emoji_manager.emoji_objects
|
||||
@@ -77,23 +83,37 @@ async def get_random() -> Optional[Tuple[str, str, str]]:
|
||||
logger.warning("[EmojiAPI] 没有有效的表情包")
|
||||
return None
|
||||
|
||||
if len(valid_emojis) < count:
|
||||
logger.warning(
|
||||
f"[EmojiAPI] 有效表情包数量 ({len(valid_emojis)}) 少于请求的数量 ({count}),将返回所有有效表情包"
|
||||
)
|
||||
count = len(valid_emojis)
|
||||
|
||||
# 随机选择
|
||||
import random
|
||||
|
||||
selected_emoji = random.choice(valid_emojis)
|
||||
emoji_base64 = image_path_to_base64(selected_emoji.full_path)
|
||||
selected_emojis = random.sample(valid_emojis, count)
|
||||
|
||||
if not emoji_base64:
|
||||
logger.error(f"[EmojiAPI] 无法转换表情包为base64: {selected_emoji.full_path}")
|
||||
results = []
|
||||
for selected_emoji in selected_emojis:
|
||||
emoji_base64 = image_path_to_base64(selected_emoji.full_path)
|
||||
|
||||
if not emoji_base64:
|
||||
logger.error(f"[EmojiAPI] 无法转换表情包为base64: {selected_emoji.full_path}")
|
||||
continue
|
||||
|
||||
matched_emotion = random.choice(selected_emoji.emotion) if selected_emoji.emotion else "随机表情"
|
||||
|
||||
# 记录使用次数
|
||||
emoji_manager.record_usage(selected_emoji.hash)
|
||||
results.append((emoji_base64, selected_emoji.description, matched_emotion))
|
||||
|
||||
if not results and count > 0:
|
||||
logger.warning("[EmojiAPI] 随机获取表情包失败,没有一个可以成功处理")
|
||||
return None
|
||||
|
||||
matched_emotion = random.choice(selected_emoji.emotion) if selected_emoji.emotion else "随机表情"
|
||||
|
||||
# 记录使用次数
|
||||
emoji_manager.record_usage(selected_emoji.hash)
|
||||
|
||||
logger.info(f"[EmojiAPI] 成功获取随机表情包: {selected_emoji.description}")
|
||||
return emoji_base64, selected_emoji.description, matched_emotion
|
||||
logger.info(f"[EmojiAPI] 成功获取 {len(results)} 个随机表情包")
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[EmojiAPI] 获取随机表情包失败: {e}")
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Tuple, Optional
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
from src.chat.utils.chat_message_builder import (
|
||||
get_raw_msg_by_timestamp,
|
||||
@@ -34,7 +35,7 @@ from src.chat.utils.chat_message_builder import (
|
||||
|
||||
|
||||
def get_messages_by_time(
|
||||
start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest"
|
||||
start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest", filter_mai: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
获取指定时间范围内的消息
|
||||
@@ -44,15 +45,18 @@ def get_messages_by_time(
|
||||
end_time: 结束时间戳
|
||||
limit: 限制返回的消息数量,0为不限制
|
||||
limit_mode: 当limit>0时生效,'earliest'表示获取最早的记录,'latest'表示获取最新的记录
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_by_timestamp(start_time, end_time, limit, limit_mode))
|
||||
return get_raw_msg_by_timestamp(start_time, end_time, limit, limit_mode)
|
||||
|
||||
|
||||
def get_messages_by_time_in_chat(
|
||||
chat_id: str, start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest"
|
||||
chat_id: str, start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest", filter_mai: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
获取指定聊天中指定时间范围内的消息
|
||||
@@ -63,15 +67,18 @@ def get_messages_by_time_in_chat(
|
||||
end_time: 结束时间戳
|
||||
limit: 限制返回的消息数量,0为不限制
|
||||
limit_mode: 当limit>0时生效,'earliest'表示获取最早的记录,'latest'表示获取最新的记录
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_by_timestamp_with_chat(chat_id, start_time, end_time, limit, limit_mode))
|
||||
return get_raw_msg_by_timestamp_with_chat(chat_id, start_time, end_time, limit, limit_mode)
|
||||
|
||||
|
||||
def get_messages_by_time_in_chat_inclusive(
|
||||
chat_id: str, start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest"
|
||||
chat_id: str, start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest", filter_mai: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
获取指定聊天中指定时间范围内的消息(包含边界)
|
||||
@@ -82,10 +89,13 @@ def get_messages_by_time_in_chat_inclusive(
|
||||
end_time: 结束时间戳(包含)
|
||||
limit: 限制返回的消息数量,0为不限制
|
||||
limit_mode: 当limit>0时生效,'earliest'表示获取最早的记录,'latest'表示获取最新的记录
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_by_timestamp_with_chat_inclusive(chat_id, start_time, end_time, limit, limit_mode))
|
||||
return get_raw_msg_by_timestamp_with_chat_inclusive(chat_id, start_time, end_time, limit, limit_mode)
|
||||
|
||||
|
||||
@@ -115,7 +125,7 @@ def get_messages_by_time_in_chat_for_users(
|
||||
|
||||
|
||||
def get_random_chat_messages(
|
||||
start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest"
|
||||
start_time: float, end_time: float, limit: int = 0, limit_mode: str = "latest", filter_mai: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
随机选择一个聊天,返回该聊天在指定时间范围内的消息
|
||||
@@ -125,10 +135,13 @@ def get_random_chat_messages(
|
||||
end_time: 结束时间戳
|
||||
limit: 限制返回的消息数量,0为不限制
|
||||
limit_mode: 当limit>0时生效,'earliest'表示获取最早的记录,'latest'表示获取最新的记录
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_by_timestamp_random(start_time, end_time, limit, limit_mode))
|
||||
return get_raw_msg_by_timestamp_random(start_time, end_time, limit, limit_mode)
|
||||
|
||||
|
||||
@@ -151,21 +164,24 @@ def get_messages_by_time_for_users(
|
||||
return get_raw_msg_by_timestamp_with_users(start_time, end_time, person_ids, limit, limit_mode)
|
||||
|
||||
|
||||
def get_messages_before_time(timestamp: float, limit: int = 0) -> List[Dict[str, Any]]:
|
||||
def get_messages_before_time(timestamp: float, limit: int = 0, filter_mai: bool = False) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
获取指定时间戳之前的消息
|
||||
|
||||
Args:
|
||||
timestamp: 时间戳
|
||||
limit: 限制返回的消息数量,0为不限制
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_before_timestamp(timestamp, limit))
|
||||
return get_raw_msg_before_timestamp(timestamp, limit)
|
||||
|
||||
|
||||
def get_messages_before_time_in_chat(chat_id: str, timestamp: float, limit: int = 0) -> List[Dict[str, Any]]:
|
||||
def get_messages_before_time_in_chat(chat_id: str, timestamp: float, limit: int = 0, filter_mai: bool = False) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
获取指定聊天中指定时间戳之前的消息
|
||||
|
||||
@@ -173,10 +189,13 @@ def get_messages_before_time_in_chat(chat_id: str, timestamp: float, limit: int
|
||||
chat_id: 聊天ID
|
||||
timestamp: 时间戳
|
||||
limit: 限制返回的消息数量,0为不限制
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_before_timestamp_with_chat(chat_id, timestamp, limit))
|
||||
return get_raw_msg_before_timestamp_with_chat(chat_id, timestamp, limit)
|
||||
|
||||
|
||||
@@ -196,7 +215,7 @@ def get_messages_before_time_for_users(timestamp: float, person_ids: list, limit
|
||||
|
||||
|
||||
def get_recent_messages(
|
||||
chat_id: str, hours: float = 24.0, limit: int = 100, limit_mode: str = "latest"
|
||||
chat_id: str, hours: float = 24.0, limit: int = 100, limit_mode: str = "latest", filter_mai: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
获取指定聊天中最近一段时间的消息
|
||||
@@ -206,12 +225,15 @@ def get_recent_messages(
|
||||
hours: 最近多少小时,默认24小时
|
||||
limit: 限制返回的消息数量,默认100条
|
||||
limit_mode: 当limit>0时生效,'earliest'表示获取最早的记录,'latest'表示获取最新的记录
|
||||
filter_mai: 是否过滤麦麦自身的消息,默认为False
|
||||
|
||||
Returns:
|
||||
消息列表
|
||||
"""
|
||||
now = time.time()
|
||||
start_time = now - hours * 3600
|
||||
if filter_mai:
|
||||
return filter_mai_messages(get_raw_msg_by_timestamp_with_chat(chat_id, start_time, now, limit, limit_mode))
|
||||
return get_raw_msg_by_timestamp_with_chat(chat_id, start_time, now, limit, limit_mode)
|
||||
|
||||
|
||||
@@ -319,3 +341,17 @@ async def get_person_ids_from_messages(messages: List[Dict[str, Any]]) -> List[s
|
||||
用户ID列表
|
||||
"""
|
||||
return await get_person_id_list(messages)
|
||||
|
||||
# =============================================================================
|
||||
# 消息过滤函数
|
||||
# =============================================================================
|
||||
|
||||
def filter_mai_messages(messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
从消息列表中移除麦麦的消息
|
||||
Args:
|
||||
messages: 消息列表,每个元素是消息字典
|
||||
Returns:
|
||||
过滤后的消息列表
|
||||
"""
|
||||
return [msg for msg in messages if msg.get("user_id") != str(global_config.bot.qq_account)]
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import random
|
||||
from typing import Tuple
|
||||
|
||||
# 导入新插件系统
|
||||
@@ -7,7 +8,7 @@ from src.plugin_system import BaseAction, ActionActivationType, ChatMode
|
||||
from src.common.logger import get_logger
|
||||
|
||||
# 导入API模块 - 标准Python包方式
|
||||
from src.plugin_system.apis import emoji_api
|
||||
from src.plugin_system.apis import emoji_api, llm_api, message_api
|
||||
from src.plugins.built_in.core_actions.no_reply import NoReplyAction
|
||||
|
||||
|
||||
@@ -39,7 +40,7 @@ class EmojiAction(BaseAction):
|
||||
"""
|
||||
|
||||
# 动作参数定义
|
||||
action_parameters = {"description": "文字描述你想要发送的表情包内容"}
|
||||
action_parameters = {"reason": "文字描述你想要发送的表情包原因"}
|
||||
|
||||
# 动作使用场景
|
||||
action_require = [
|
||||
@@ -56,18 +57,82 @@ class EmojiAction(BaseAction):
|
||||
logger.info(f"{self.log_prefix} 决定发送表情")
|
||||
|
||||
try:
|
||||
# 1. 根据描述选择表情包
|
||||
description = self.action_data.get("description", "")
|
||||
emoji_result = await emoji_api.get_by_description(description)
|
||||
# 1. 获取发送表情的原因
|
||||
reason = self.action_data.get("reason", "表达当前情绪")
|
||||
logger.info(f"{self.log_prefix} 发送表情原因: {reason}")
|
||||
|
||||
if not emoji_result:
|
||||
logger.warning(f"{self.log_prefix} 未找到匹配描述 '{description}' 的表情包")
|
||||
return False, f"未找到匹配 '{description}' 的表情包"
|
||||
# 2. 随机获取20个表情包
|
||||
sampled_emojis = await emoji_api.get_random(30)
|
||||
if not sampled_emojis:
|
||||
logger.warning(f"{self.log_prefix} 无法获取随机表情包")
|
||||
return False, "无法获取随机表情包"
|
||||
|
||||
emoji_base64, emoji_description, matched_emotion = emoji_result
|
||||
logger.info(f"{self.log_prefix} 找到表达{matched_emotion}的表情包")
|
||||
# 3. 准备情感数据
|
||||
emotion_map = {}
|
||||
for b64, desc, emo in sampled_emojis:
|
||||
if emo not in emotion_map:
|
||||
emotion_map[emo] = []
|
||||
emotion_map[emo].append((b64, desc))
|
||||
|
||||
# 使用BaseAction的便捷方法发送表情包
|
||||
available_emotions = list(emotion_map.keys())
|
||||
|
||||
if not available_emotions:
|
||||
logger.warning(f"{self.log_prefix} 获取到的表情包均无情感标签, 将随机发送")
|
||||
emoji_base64, emoji_description, _ = random.choice(sampled_emojis)
|
||||
else:
|
||||
# 获取最近的5条消息内容用于判断
|
||||
recent_messages = message_api.get_recent_messages(chat_id=self.chat_id, limit=5)
|
||||
messages_text = ""
|
||||
if recent_messages:
|
||||
# 使用message_api构建可读的消息字符串
|
||||
messages_text = message_api.build_readable_messages(
|
||||
messages=recent_messages,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
truncate=False,
|
||||
show_actions=False,
|
||||
)
|
||||
|
||||
# 4. 构建prompt让LLM选择情感
|
||||
prompt = f"""
|
||||
你是一个正在进行聊天的网友,你需要根据一个理由和最近的聊天记录,从一个情感标签列表中选择最匹配的一个。
|
||||
这是最近的聊天记录:
|
||||
{messages_text}
|
||||
|
||||
这是理由:“{reason}”
|
||||
这里是可用的情感标签:{available_emotions}
|
||||
请直接返回最匹配的那个情感标签,不要进行任何解释或添加其他多余的文字。
|
||||
"""
|
||||
logger.info(f"{self.log_prefix} 生成的LLM Prompt: {prompt}")
|
||||
|
||||
# 5. 调用LLM
|
||||
models = llm_api.get_available_models()
|
||||
chat_model_config = getattr(models, "utils_small", None) # 默认使用chat模型
|
||||
if not chat_model_config:
|
||||
logger.error(f"{self.log_prefix} 未找到'chat'模型配置,无法调用LLM")
|
||||
return False, "未找到'chat'模型配置"
|
||||
|
||||
success, chosen_emotion, _, _ = await llm_api.generate_with_model(
|
||||
prompt, model_config=chat_model_config, request_type="emoji"
|
||||
)
|
||||
|
||||
if not success:
|
||||
logger.error(f"{self.log_prefix} LLM调用失败: {chosen_emotion}")
|
||||
return False, f"LLM调用失败: {chosen_emotion}"
|
||||
|
||||
chosen_emotion = chosen_emotion.strip().replace('"', "").replace("'", "")
|
||||
logger.info(f"{self.log_prefix} LLM选择的情感: {chosen_emotion}")
|
||||
|
||||
# 6. 根据选择的情感匹配表情包
|
||||
if chosen_emotion in emotion_map:
|
||||
emoji_base64, emoji_description = random.choice(emotion_map[chosen_emotion])
|
||||
logger.info(f"{self.log_prefix} 找到匹配情感 '{chosen_emotion}' 的表情包: {emoji_description}")
|
||||
else:
|
||||
logger.warning(
|
||||
f"{self.log_prefix} LLM选择的情感 '{chosen_emotion}' 不在可用列表中, 将随机选择一个表情包"
|
||||
)
|
||||
emoji_base64, emoji_description, _ = random.choice(sampled_emojis)
|
||||
|
||||
# 7. 发送表情包
|
||||
success = await self.send_emoji(emoji_base64)
|
||||
|
||||
if not success:
|
||||
@@ -80,5 +145,5 @@ class EmojiAction(BaseAction):
|
||||
return True, f"发送表情包: {emoji_description}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 表情动作执行失败: {e}")
|
||||
logger.error(f"{self.log_prefix} 表情动作执行失败: {e}", exc_info=True)
|
||||
return False, f"表情发送失败: {str(e)}"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import random
|
||||
import time
|
||||
import json
|
||||
import asyncio
|
||||
from typing import Tuple
|
||||
|
||||
# 导入新插件系统
|
||||
@@ -10,26 +10,24 @@ from src.plugin_system import BaseAction, ActionActivationType, ChatMode
|
||||
from src.common.logger import get_logger
|
||||
|
||||
# 导入API模块 - 标准Python包方式
|
||||
from src.plugin_system.apis import message_api, llm_api
|
||||
from src.plugin_system.apis import message_api
|
||||
from src.config.config import global_config
|
||||
from json_repair import repair_json
|
||||
|
||||
|
||||
logger = get_logger("core_actions")
|
||||
|
||||
#设置一个全局字典,确保同一个消息流的下一个NoReplyAction实例能够获取到上一次消息的时间戳
|
||||
_CHAT_START_TIMES = {}
|
||||
|
||||
class NoReplyAction(BaseAction):
|
||||
"""不回复动作,使用智能判断机制决定何时结束等待
|
||||
"""不回复动作,根据新消息的兴趣值或数量决定何时结束等待.
|
||||
|
||||
新的等待逻辑:
|
||||
- 每0.2秒检查是否有新消息(提高响应性)
|
||||
- 如果累计消息数量达到阈值(默认20条),直接结束等待
|
||||
- 有新消息时进行LLM判断,但最快1秒一次(防止过于频繁)
|
||||
- 如果判断需要回复,则结束等待;否则继续等待
|
||||
- 达到最大超时时间后强制结束
|
||||
新的等待逻辑:
|
||||
1. 新消息累计兴趣值超过阈值 (默认10) 则结束等待
|
||||
2. 累计新消息数量达到随机阈值 (默认5-10条) 则结束等待
|
||||
"""
|
||||
|
||||
focus_activation_type = ActionActivationType.ALWAYS
|
||||
# focus_activation_type = ActionActivationType.RANDOM
|
||||
normal_activation_type = ActionActivationType.NEVER
|
||||
mode_enable = ChatMode.FOCUS
|
||||
parallel_action = False
|
||||
@@ -41,65 +39,60 @@ class NoReplyAction(BaseAction):
|
||||
# 连续no_reply计数器
|
||||
_consecutive_count = 0
|
||||
|
||||
# LLM判断的最小间隔时间
|
||||
_min_judge_interval = 1.0 # 最快1秒一次LLM判断
|
||||
|
||||
# 自动结束的消息数量阈值
|
||||
_auto_exit_message_count = 20 # 累计20条消息自动结束
|
||||
|
||||
# 最大等待超时时间
|
||||
_max_timeout = 600 # 1200秒
|
||||
|
||||
# 跳过LLM判断的配置
|
||||
_skip_judge_when_tired = True
|
||||
_skip_probability = 0.5
|
||||
|
||||
# 新增:回复频率退出专注模式的配置
|
||||
_frequency_check_window = 600 # 频率检查窗口时间(秒)
|
||||
# 新增:兴趣值退出阈值
|
||||
_interest_exit_threshold = 3.0
|
||||
# 新增:消息数量退出阈值
|
||||
_min_exit_message_count = 4
|
||||
_max_exit_message_count = 8
|
||||
|
||||
# 动作参数定义
|
||||
action_parameters = {"reason": "不回复的原因"}
|
||||
|
||||
# 动作使用场景
|
||||
action_require = ["你发送了消息,目前无人回复"]
|
||||
action_require = [
|
||||
"你发送了消息,目前无人回复",
|
||||
"你觉得对方还没把话说完",
|
||||
"你觉得当前话题不适合插嘴",
|
||||
"你觉得自己说话太多了"
|
||||
]
|
||||
|
||||
# 关联类型
|
||||
associated_types = []
|
||||
|
||||
async def execute(self) -> Tuple[bool, str]:
|
||||
"""执行不回复动作,有新消息时进行判断,但最快1秒一次"""
|
||||
import asyncio
|
||||
|
||||
"""执行不回复动作"""
|
||||
try:
|
||||
|
||||
# 获取或初始化当前消息的起始时间,因为用户消息是可能在刚决定好可用动作,但还没选择动作的时候发送的。原先的start_time设计会导致这种消息被漏掉,现在采用全局字典存储
|
||||
if self.chat_id not in _CHAT_START_TIMES:
|
||||
# 如果对应消息流没有存储时间,就设置为当前时间
|
||||
_CHAT_START_TIMES[self.chat_id] = time.time()
|
||||
start_time = _CHAT_START_TIMES[self.chat_id]
|
||||
else:
|
||||
message_current_time = time.time()
|
||||
if message_current_time - _CHAT_START_TIMES[self.chat_id] > 600:
|
||||
# 如果上一次NoReplyAction实例记录的最后消息时间戳距离现在时间戳超过了十分钟,将会把start_time设置为当前时间戳,避免在数据库内过度搜索
|
||||
start_time = message_current_time
|
||||
logger.debug("距离上一次消息时间过长,已重置等待开始时间为当前时间")
|
||||
else:
|
||||
# 如果距离上一次noreply没有十分钟,就沿用上一次noreply退出时记录的最新消息时间戳
|
||||
start_time = _CHAT_START_TIMES[self.chat_id]
|
||||
|
||||
# 增加连续计数
|
||||
NoReplyAction._consecutive_count += 1
|
||||
count = NoReplyAction._consecutive_count
|
||||
|
||||
reason = self.action_data.get("reason", "")
|
||||
start_time = time.time()
|
||||
last_judge_time = start_time # 上次进行LLM判断的时间
|
||||
min_judge_interval = self._min_judge_interval # 最小判断间隔,从配置获取
|
||||
check_interval = 0.2 # 检查新消息的间隔,设为0.2秒提高响应性
|
||||
check_interval = 1.0 # 每秒检查一次
|
||||
|
||||
# 累积判断历史
|
||||
judge_history = [] # 存储每次判断的结果和理由
|
||||
|
||||
# 获取no_reply开始时的上下文消息(10条),用于后续记录
|
||||
context_messages = message_api.get_messages_by_time_in_chat(
|
||||
chat_id=self.chat_id,
|
||||
start_time=start_time - 600, # 获取开始前10分钟内的消息
|
||||
end_time=start_time,
|
||||
limit=10,
|
||||
limit_mode="latest",
|
||||
# 随机生成本次等待需要的新消息数量阈值
|
||||
exit_message_count_threshold = random.randint(self._min_exit_message_count, self._max_exit_message_count)
|
||||
logger.info(
|
||||
f"{self.log_prefix} 本次no_reply需要 {exit_message_count_threshold} 条新消息或累计兴趣值超过 {self._interest_exit_threshold} 才能打断"
|
||||
)
|
||||
|
||||
# 构建上下文字符串
|
||||
context_str = ""
|
||||
if context_messages:
|
||||
context_str = message_api.build_readable_messages(
|
||||
messages=context_messages, timestamp_mode="normal_no_YMD", truncate=False, show_actions=True
|
||||
)
|
||||
context_str = f"当时选择no_reply前的聊天上下文:\n{context_str}\n"
|
||||
if not self.is_group:
|
||||
exit_message_count_threshold = 1
|
||||
logger.info(f"检测到当前环境为私聊,本次no_reply已更正为需要{exit_message_count_threshold}条新消息就能打断")
|
||||
|
||||
logger.info(f"{self.log_prefix} 选择不回复(第{count}次),开始摸鱼,原因: {reason}")
|
||||
|
||||
@@ -108,196 +101,79 @@ class NoReplyAction(BaseAction):
|
||||
current_time = time.time()
|
||||
elapsed_time = current_time - start_time
|
||||
|
||||
if global_config.chat.chat_mode == "auto" and self.is_group:
|
||||
# 检查是否超时
|
||||
if elapsed_time >= self._max_timeout or self._check_no_activity_and_exit_focus(current_time):
|
||||
logger.info(
|
||||
f"{self.log_prefix} 等待时间过久({self._max_timeout}秒)或过去10分钟完全没有发言,退出专注模式"
|
||||
)
|
||||
# 标记退出专注模式
|
||||
self.action_data["_system_command"] = "stop_focus_chat"
|
||||
exit_reason = f"{global_config.bot.nickname}(你)等待了{self._max_timeout}秒,或完全没有说话,感觉群里没有新内容,决定退出专注模式,稍作休息"
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=exit_reason,
|
||||
action_done=True,
|
||||
)
|
||||
return True, exit_reason
|
||||
|
||||
# 检查是否有新消息
|
||||
new_message_count = message_api.count_new_messages(
|
||||
chat_id=self.chat_id, start_time=start_time, end_time=current_time
|
||||
# 1. 检查新消息,默认过滤麦麦自己的消息
|
||||
recent_messages_dict = message_api.get_messages_by_time_in_chat(
|
||||
chat_id=self.chat_id, start_time=start_time, end_time=current_time, filter_mai=True
|
||||
)
|
||||
new_message_count = len(recent_messages_dict)
|
||||
|
||||
# 如果累计消息数量达到阈值,直接结束等待
|
||||
if new_message_count >= self._auto_exit_message_count:
|
||||
logger.info(f"{self.log_prefix} 累计消息数量达到{new_message_count}条,直接结束等待")
|
||||
# 2. 检查消息数量是否达到阈值
|
||||
if new_message_count >= exit_message_count_threshold:
|
||||
logger.info(
|
||||
f"{self.log_prefix} 累计消息数量达到{new_message_count}条(>{exit_message_count_threshold}),结束等待"
|
||||
)
|
||||
exit_reason = f"{global_config.bot.nickname}(你)看到了{new_message_count}条新消息,可以考虑一下是否要进行回复"
|
||||
# 如果是私聊,就稍微改一下退出理由
|
||||
if not self.is_group:
|
||||
exit_reason = f"{global_config.bot.nickname}(你)看到了私聊的{new_message_count}条新消息,可以考虑一下是否要进行回复"
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_build_into_prompt=False,
|
||||
action_prompt_display=exit_reason,
|
||||
action_done=True,
|
||||
)
|
||||
return True, f"累计消息数量达到{new_message_count}条,直接结束等待 (等待时间: {elapsed_time:.1f}秒)"
|
||||
|
||||
# 获取最后一条消息
|
||||
latest_message = recent_messages_dict[-1]
|
||||
# 在退出时更新全局字典时间戳(加1微秒防止重复)
|
||||
_CHAT_START_TIMES[self.chat_id] = latest_message['time'] + 0.000001 # 0.000001秒 = 1微秒
|
||||
|
||||
# 判定条件:累计3条消息或等待超过5秒且有新消息
|
||||
time_since_last_judge = current_time - last_judge_time
|
||||
should_judge, trigger_reason = self._should_trigger_judge(new_message_count, time_since_last_judge)
|
||||
return True, f"累计消息数量达到{new_message_count}条,结束等待 (等待时间: {elapsed_time:.1f}秒)"
|
||||
|
||||
if should_judge and time_since_last_judge >= min_judge_interval:
|
||||
logger.info(f"{self.log_prefix} 触发判定({trigger_reason}),进行智能判断...")
|
||||
|
||||
# 获取最近的消息内容用于判断
|
||||
recent_messages = message_api.get_messages_by_time_in_chat(
|
||||
chat_id=self.chat_id,
|
||||
start_time=start_time,
|
||||
end_time=current_time,
|
||||
)
|
||||
|
||||
if recent_messages:
|
||||
# 使用message_api构建可读的消息字符串
|
||||
messages_text = message_api.build_readable_messages(
|
||||
messages=recent_messages,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
truncate=False,
|
||||
show_actions=False,
|
||||
# 3. 检查累计兴趣值
|
||||
if new_message_count > 0:
|
||||
accumulated_interest = 0.0
|
||||
for msg_dict in recent_messages_dict:
|
||||
text = msg_dict.get("processed_plain_text", "")
|
||||
interest_value = msg_dict.get("interest_value", 0.0)
|
||||
if text:
|
||||
accumulated_interest += interest_value
|
||||
logger.info(f"{self.log_prefix} 当前累计兴趣值: {accumulated_interest:.2f}")
|
||||
if accumulated_interest >= self._interest_exit_threshold:
|
||||
logger.info(
|
||||
f"{self.log_prefix} 累计兴趣值达到{accumulated_interest:.2f}(>{self._interest_exit_threshold}),结束等待"
|
||||
)
|
||||
exit_reason = f"{global_config.bot.nickname}(你)感觉到了大家浓厚的兴趣(兴趣值{accumulated_interest:.1f}),决定重新加入讨论"
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=False,
|
||||
action_prompt_display=exit_reason,
|
||||
action_done=True,
|
||||
)
|
||||
|
||||
# 获取身份信息
|
||||
bot_name = global_config.bot.nickname
|
||||
bot_nickname = ""
|
||||
if global_config.bot.alias_names:
|
||||
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
|
||||
bot_core_personality = global_config.personality.personality_core
|
||||
identity_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}"
|
||||
# 获取最后一条消息
|
||||
latest_message = recent_messages_dict[-1]
|
||||
# 在退出时更新全局字典时间戳(加1微秒防止重复)
|
||||
_CHAT_START_TIMES[self.chat_id] = latest_message['time'] + 0.000001 # 0.000001秒 = 1微秒
|
||||
|
||||
# 构建判断历史字符串(最多显示3条)
|
||||
history_block = ""
|
||||
if judge_history:
|
||||
history_block = "之前的判断历史:\n"
|
||||
# 只取最近的3条历史记录
|
||||
recent_history = judge_history[-3:] if len(judge_history) > 3 else judge_history
|
||||
for i, (timestamp, judge_result, reason) in enumerate(recent_history, 1):
|
||||
elapsed_seconds = int(timestamp - start_time)
|
||||
history_block += f"{i}. 等待{elapsed_seconds}秒时判断:{judge_result},理由:{reason}\n"
|
||||
history_block += "\n"
|
||||
|
||||
# 检查过去10分钟的发言频率
|
||||
frequency_block, should_skip_llm_judge = self._get_fatigue_status(current_time)
|
||||
|
||||
# 如果决定跳过LLM判断,直接更新时间并继续等待
|
||||
if should_skip_llm_judge:
|
||||
logger.info(f"{self.log_prefix} 疲劳,继续等待。")
|
||||
last_judge_time = time.time() # 更新判断时间,避免立即重新判断
|
||||
start_time = current_time # 更新消息检查的起始时间,以避免重复判断
|
||||
continue # 跳过本次LLM判断,继续循环等待
|
||||
|
||||
# 构建判断上下文
|
||||
chat_context = "QQ群" if self.is_group else "私聊"
|
||||
judge_prompt = f"""
|
||||
{identity_block}
|
||||
|
||||
你现在正在{chat_context}参与聊天,以下是聊天内容:
|
||||
{context_str}
|
||||
在以上的聊天中,你选择了暂时不回复,现在,你看到了新的聊天消息如下:
|
||||
{messages_text}
|
||||
|
||||
{history_block}
|
||||
请注意:{frequency_block}
|
||||
请你判断,是否要结束不回复的状态,重新加入聊天讨论。
|
||||
|
||||
判断标准:
|
||||
1. 如果有人直接@你、提到你的名字或明确向你询问,应该回复
|
||||
2. 如果话题发生重要变化,需要你参与讨论,应该回复
|
||||
3. 如果只是普通闲聊、重复内容或与你无关的讨论,不需要回复
|
||||
4. 如果消息内容过于简单(如单纯的表情、"哈哈"等),不需要回复
|
||||
5. 参考之前的判断历史,如果情况有明显变化或持续等待时间过长,考虑调整判断
|
||||
|
||||
请用JSON格式回复你的判断,严格按照以下格式:
|
||||
{{
|
||||
"should_reply": true/false,
|
||||
"reason": "详细说明你的判断理由"
|
||||
}}
|
||||
"""
|
||||
|
||||
try:
|
||||
# 获取可用的模型配置
|
||||
available_models = llm_api.get_available_models()
|
||||
|
||||
# 使用 utils_small 模型
|
||||
small_model = getattr(available_models, "utils_small", None)
|
||||
|
||||
logger.debug(judge_prompt)
|
||||
|
||||
if small_model:
|
||||
# 使用小模型进行判断
|
||||
success, response, reasoning, model_name = await llm_api.generate_with_model(
|
||||
prompt=judge_prompt,
|
||||
model_config=small_model,
|
||||
request_type="plugin.no_reply_judge",
|
||||
temperature=0.7, # 进一步降低温度,提高JSON输出的一致性和准确性
|
||||
)
|
||||
|
||||
# 更新上次判断时间
|
||||
last_judge_time = time.time()
|
||||
|
||||
if success and response:
|
||||
response = response.strip()
|
||||
logger.debug(f"{self.log_prefix} 模型({model_name})原始JSON响应: {response}")
|
||||
|
||||
# 解析LLM的JSON响应,提取判断结果和理由
|
||||
judge_result, reason = self._parse_llm_judge_response(response)
|
||||
|
||||
if judge_result:
|
||||
logger.info(f"{self.log_prefix} 决定继续参与讨论,结束等待,原因: {reason}")
|
||||
else:
|
||||
logger.info(f"{self.log_prefix} 决定不参与讨论,继续等待,原因: {reason}")
|
||||
|
||||
# 将判断结果保存到历史中
|
||||
judge_history.append((current_time, judge_result, reason))
|
||||
|
||||
if judge_result == "需要回复":
|
||||
# logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待")
|
||||
|
||||
full_prompt = f"{global_config.bot.nickname}(你)的想法是:{reason}"
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=full_prompt,
|
||||
action_done=True,
|
||||
)
|
||||
return True, f"检测到需要回复的消息,结束等待 (等待时间: {elapsed_time:.1f}秒)"
|
||||
else:
|
||||
logger.info(f"{self.log_prefix} 模型判断不需要回复,理由: {reason},继续等待")
|
||||
# 更新开始时间,避免重复判断同样的消息
|
||||
start_time = current_time
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 模型判断失败,继续等待")
|
||||
else:
|
||||
logger.warning(f"{self.log_prefix} 未找到可用的模型配置,继续等待")
|
||||
last_judge_time = time.time() # 即使失败也更新时间,避免频繁重试
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 模型判断异常: {e},继续等待")
|
||||
last_judge_time = time.time() # 异常时也更新时间,避免频繁重试
|
||||
return (
|
||||
True,
|
||||
f"累计兴趣值达到{accumulated_interest:.2f},结束等待 (等待时间: {elapsed_time:.1f}秒)",
|
||||
)
|
||||
|
||||
# 每10秒输出一次等待状态
|
||||
if elapsed_time < 60:
|
||||
if int(elapsed_time) % 10 == 0 and int(elapsed_time) > 0:
|
||||
logger.debug(f"{self.log_prefix} 已等待{elapsed_time:.0f}秒,等待新消息...")
|
||||
await asyncio.sleep(1)
|
||||
else:
|
||||
if int(elapsed_time) % 180 == 0 and int(elapsed_time) > 0:
|
||||
logger.info(f"{self.log_prefix} 已等待{elapsed_time / 60:.0f}分钟,等待新消息...")
|
||||
await asyncio.sleep(1)
|
||||
if int(elapsed_time) > 0 and int(elapsed_time) % 10 == 0:
|
||||
logger.debug(
|
||||
f"{self.log_prefix} 已等待{elapsed_time:.0f}秒,累计{new_message_count}条消息,继续等待..."
|
||||
)
|
||||
# 使用 asyncio.sleep(1) 来避免在同一秒内重复打印日志
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# 短暂等待后继续检查
|
||||
await asyncio.sleep(check_interval)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 不回复动作执行失败: {e}")
|
||||
# 即使执行失败也要记录
|
||||
exit_reason = f"执行异常: {str(e)}"
|
||||
full_prompt = f"{context_str}{exit_reason},你思考是否要进行回复"
|
||||
full_prompt = f"no_reply执行异常: {exit_reason},你思考是否要进行回复"
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True,
|
||||
action_prompt_display=full_prompt,
|
||||
@@ -305,215 +181,6 @@ class NoReplyAction(BaseAction):
|
||||
)
|
||||
return False, f"不回复动作执行失败: {e}"
|
||||
|
||||
def _should_trigger_judge(self, new_message_count: int, time_since_last_judge: float) -> Tuple[bool, str]:
|
||||
"""判断是否应该触发智能判断,并返回触发原因。
|
||||
|
||||
Args:
|
||||
new_message_count: 新消息的数量。
|
||||
time_since_last_judge: 距离上次判断的时间。
|
||||
|
||||
Returns:
|
||||
一个元组 (should_judge, reason)。
|
||||
- should_judge: 一个布尔值,指示是否应该触发判断。
|
||||
- reason: 触发判断的原因字符串。
|
||||
"""
|
||||
# 判定条件:累计3条消息或等待超过15秒且有新消息
|
||||
should_judge_flag = new_message_count >= 3 or (new_message_count > 0 and time_since_last_judge >= 15.0)
|
||||
|
||||
if not should_judge_flag:
|
||||
return False, ""
|
||||
|
||||
# 判断触发原因
|
||||
if new_message_count >= 3:
|
||||
return True, f"累计{new_message_count}条消息"
|
||||
elif new_message_count > 0 and time_since_last_judge >= 15.0:
|
||||
return True, f"等待{time_since_last_judge:.1f}秒且有新消息"
|
||||
|
||||
return False, ""
|
||||
|
||||
def _get_fatigue_status(self, current_time: float) -> Tuple[str, bool]:
|
||||
"""
|
||||
根据最近的发言频率生成疲劳提示,并决定是否跳过判断。
|
||||
|
||||
Args:
|
||||
current_time: 当前时间戳。
|
||||
|
||||
Returns:
|
||||
一个元组 (frequency_block, should_skip_judge)。
|
||||
- frequency_block: 疲劳度相关的提示字符串。
|
||||
- should_skip_judge: 是否应该跳过LLM判断的布尔值。
|
||||
"""
|
||||
try:
|
||||
# 获取过去10分钟的所有消息
|
||||
past_10min_time = current_time - 600 # 10分钟前
|
||||
all_messages_10min = message_api.get_messages_by_time_in_chat(
|
||||
chat_id=self.chat_id,
|
||||
start_time=past_10min_time,
|
||||
end_time=current_time,
|
||||
)
|
||||
|
||||
# 手动过滤bot自己的消息
|
||||
bot_message_count = 0
|
||||
if all_messages_10min:
|
||||
user_id = global_config.bot.qq_account
|
||||
for message in all_messages_10min:
|
||||
sender_id = message.get("user_id", "")
|
||||
if sender_id == user_id:
|
||||
bot_message_count += 1
|
||||
|
||||
talk_frequency_threshold = global_config.chat.get_current_talk_frequency(self.chat_id) * 10
|
||||
|
||||
if bot_message_count > talk_frequency_threshold:
|
||||
over_count = bot_message_count - talk_frequency_threshold
|
||||
skip_probability = 0
|
||||
frequency_block = ""
|
||||
|
||||
if over_count <= 3:
|
||||
frequency_block = "你感觉稍微有些累,回复的有点多了。\n"
|
||||
elif over_count <= 5:
|
||||
frequency_block = "你今天说话比较多,感觉有点疲惫,想要稍微休息一下。\n"
|
||||
elif over_count <= 8:
|
||||
frequency_block = "你发现自己说话太多了,感觉很累,想要安静一会儿,除非有重要的事情否则不想回复。\n"
|
||||
skip_probability = self._skip_probability
|
||||
else:
|
||||
frequency_block = "你感觉非常累,想要安静一会儿。\n"
|
||||
skip_probability = 1
|
||||
|
||||
should_skip_judge = self._skip_judge_when_tired and random.random() < skip_probability
|
||||
|
||||
if should_skip_judge:
|
||||
logger.info(
|
||||
f"{self.log_prefix} 发言过多(超过{over_count}条),随机决定跳过此次LLM判断(概率{skip_probability * 100:.0f}%)"
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 过去10分钟发言{bot_message_count}条,超过阈值{talk_frequency_threshold},添加疲惫提示"
|
||||
)
|
||||
return frequency_block, should_skip_judge
|
||||
else:
|
||||
# 回复次数少时的正向提示
|
||||
under_count = talk_frequency_threshold - bot_message_count
|
||||
frequency_block = ""
|
||||
if under_count >= talk_frequency_threshold * 0.8:
|
||||
frequency_block = "你感觉精力充沛,状态很好,积极参与聊天。\n"
|
||||
elif under_count >= talk_frequency_threshold * 0.5:
|
||||
frequency_block = "你感觉状态不错。\n"
|
||||
|
||||
logger.info(
|
||||
f"{self.log_prefix} 过去10分钟发言{bot_message_count}条,未超过阈值{talk_frequency_threshold},添加正向提示"
|
||||
)
|
||||
return frequency_block, False
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"{self.log_prefix} 检查发言频率时出错: {e}")
|
||||
return "", False
|
||||
|
||||
def _check_no_activity_and_exit_focus(self, current_time: float) -> bool:
|
||||
"""检查过去10分钟是否完全没有发言,决定是否退出专注模式
|
||||
|
||||
Args:
|
||||
current_time: 当前时间戳
|
||||
|
||||
Returns:
|
||||
bool: 是否应该退出专注模式
|
||||
"""
|
||||
try:
|
||||
# 只在auto模式下进行检查
|
||||
if global_config.chat.chat_mode != "auto":
|
||||
return False
|
||||
|
||||
# 获取过去10分钟的所有消息
|
||||
past_10min_time = current_time - 600 # 10分钟前
|
||||
all_messages = message_api.get_messages_by_time_in_chat(
|
||||
chat_id=self.chat_id,
|
||||
start_time=past_10min_time,
|
||||
end_time=current_time,
|
||||
)
|
||||
|
||||
if not all_messages:
|
||||
# 如果完全没有消息,也不需要退出专注模式
|
||||
return False
|
||||
|
||||
# 统计bot自己的回复数量
|
||||
bot_message_count = 0
|
||||
user_id = global_config.bot.qq_account
|
||||
|
||||
for message in all_messages:
|
||||
sender_id = message.get("user_id", "")
|
||||
if sender_id == user_id:
|
||||
bot_message_count += 1
|
||||
|
||||
# 如果过去10分钟bot一条消息也没有发送,退出专注模式
|
||||
if bot_message_count == 0:
|
||||
logger.info(f"{self.log_prefix} 过去10分钟bot完全没有发言,准备退出专注模式")
|
||||
return True
|
||||
else:
|
||||
logger.debug(f"{self.log_prefix} 过去10分钟bot发言{bot_message_count}条,继续保持专注模式")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 检查无活动状态时出错: {e}")
|
||||
return False
|
||||
|
||||
def _parse_llm_judge_response(self, response: str) -> tuple[str, str]:
|
||||
"""解析LLM判断响应,使用JSON格式提取判断结果和理由
|
||||
|
||||
Args:
|
||||
response: LLM的原始JSON响应
|
||||
|
||||
Returns:
|
||||
tuple: (判断结果, 理由)
|
||||
"""
|
||||
try:
|
||||
# 使用repair_json修复可能有问题的JSON格式
|
||||
fixed_json_string = repair_json(response)
|
||||
logger.debug(f"{self.log_prefix} repair_json修复后的响应: {fixed_json_string}")
|
||||
|
||||
# 如果repair_json返回的是字符串,需要解析为Python对象
|
||||
if isinstance(fixed_json_string, str):
|
||||
result_json = json.loads(fixed_json_string)
|
||||
else:
|
||||
# 如果repair_json直接返回了字典对象,直接使用
|
||||
result_json = fixed_json_string
|
||||
|
||||
# 从JSON中提取判断结果和理由
|
||||
should_reply = result_json.get("should_reply", False)
|
||||
reason = result_json.get("reason", "无法获取判断理由")
|
||||
|
||||
# 转换布尔值为中文字符串
|
||||
judge_result = "需要回复" if should_reply else "不需要回复"
|
||||
|
||||
logger.debug(f"{self.log_prefix} JSON解析成功 - 判断: {judge_result}, 理由: {reason}")
|
||||
return judge_result, reason
|
||||
|
||||
except (json.JSONDecodeError, KeyError, TypeError) as e:
|
||||
logger.warning(f"{self.log_prefix} JSON解析失败,尝试文本解析: {e}")
|
||||
|
||||
# 如果JSON解析失败,回退到简单的关键词匹配
|
||||
try:
|
||||
response_lower = response.lower()
|
||||
|
||||
if "true" in response_lower or "需要回复" in response:
|
||||
judge_result = "需要回复"
|
||||
reason = "从响应文本中检测到需要回复的指示"
|
||||
elif "false" in response_lower or "不需要回复" in response:
|
||||
judge_result = "不需要回复"
|
||||
reason = "从响应文本中检测到不需要回复的指示"
|
||||
else:
|
||||
judge_result = "不需要回复" # 默认值
|
||||
reason = f"无法解析响应格式,使用默认判断。原始响应: {response[:100]}..."
|
||||
|
||||
logger.debug(f"{self.log_prefix} 文本解析结果 - 判断: {judge_result}, 理由: {reason}")
|
||||
return judge_result, reason
|
||||
|
||||
except Exception as fallback_e:
|
||||
logger.error(f"{self.log_prefix} 文本解析也失败: {fallback_e}")
|
||||
return "不需要回复", f"解析异常: {str(e)}, 回退解析也失败: {str(fallback_e)}"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix} 解析LLM响应时出错: {e}")
|
||||
return "不需要回复", f"解析异常: {str(e)}"
|
||||
|
||||
@classmethod
|
||||
def reset_consecutive_count(cls):
|
||||
"""重置连续计数器"""
|
||||
|
||||
@@ -9,6 +9,7 @@ import random
|
||||
import time
|
||||
from typing import List, Tuple, Type
|
||||
import asyncio
|
||||
import re
|
||||
|
||||
# 导入新插件系统
|
||||
from src.plugin_system import BasePlugin, register_plugin, BaseAction, ComponentInfo, ActionActivationType, ChatMode
|
||||
@@ -54,12 +55,26 @@ class ReplyAction(BaseAction):
|
||||
# 关联类型
|
||||
associated_types = ["text"]
|
||||
|
||||
def _parse_reply_target(self, target_message: str) -> tuple:
|
||||
sender = ""
|
||||
target = ""
|
||||
if ":" in target_message or ":" in target_message:
|
||||
# 使用正则表达式匹配中文或英文冒号
|
||||
parts = re.split(pattern=r"[::]", string=target_message, maxsplit=1)
|
||||
if len(parts) == 2:
|
||||
sender = parts[0].strip()
|
||||
target = parts[1].strip()
|
||||
return sender, target
|
||||
|
||||
async def execute(self) -> Tuple[bool, str]:
|
||||
"""执行回复动作"""
|
||||
logger.info(f"{self.log_prefix} 决定进行回复")
|
||||
|
||||
start_time = self.action_data.get("loop_start_time", time.time())
|
||||
|
||||
reply_to = self.action_data.get("reply_to", "")
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
|
||||
try:
|
||||
try:
|
||||
success, reply_set = await asyncio.wait_for(
|
||||
@@ -105,6 +120,11 @@ class ReplyAction(BaseAction):
|
||||
reply_text += data
|
||||
|
||||
# 存储动作记录
|
||||
if sender and target:
|
||||
reply_text = f"你对{sender}说的{target},进行了回复:{reply_text}"
|
||||
else:
|
||||
reply_text = f"你进行发言:{reply_text}"
|
||||
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=False,
|
||||
action_prompt_display=reply_text,
|
||||
@@ -142,37 +162,18 @@ class CoreActionsPlugin(BasePlugin):
|
||||
config_section_descriptions = {
|
||||
"plugin": "插件启用配置",
|
||||
"components": "核心组件启用配置",
|
||||
"no_reply": "不回复动作配置(智能等待机制)",
|
||||
}
|
||||
|
||||
# 配置Schema定义
|
||||
config_schema = {
|
||||
"plugin": {
|
||||
"enabled": ConfigField(type=bool, default=True, description="是否启用插件"),
|
||||
"config_version": ConfigField(type=str, default="0.3.1", description="配置文件版本"),
|
||||
"enabled": ConfigField(type=bool, default=False, description="是否启用插件"),
|
||||
"config_version": ConfigField(type=str, default="0.4.0", description="配置文件版本"),
|
||||
},
|
||||
"components": {
|
||||
"enable_reply": ConfigField(type=bool, default=True, description="是否启用'回复'动作"),
|
||||
"enable_no_reply": ConfigField(type=bool, default=True, description="是否启用'不回复'动作"),
|
||||
"enable_emoji": ConfigField(type=bool, default=True, description="是否启用'表情'动作"),
|
||||
},
|
||||
"no_reply": {
|
||||
"max_timeout": ConfigField(type=int, default=1200, description="最大等待超时时间(秒)"),
|
||||
"min_judge_interval": ConfigField(
|
||||
type=float, default=1.0, description="LLM判断的最小间隔时间(秒),防止过于频繁"
|
||||
),
|
||||
"auto_exit_message_count": ConfigField(
|
||||
type=int, default=20, description="累计消息数量达到此阈值时自动结束等待"
|
||||
),
|
||||
"random_probability": ConfigField(
|
||||
type=float, default=0.8, description="Focus模式下,随机选择不回复的概率(0.0到1.0)", example=0.8
|
||||
),
|
||||
"skip_judge_when_tired": ConfigField(
|
||||
type=bool, default=True, description="当发言过多时是否启用跳过LLM判断机制"
|
||||
),
|
||||
"frequency_check_window": ConfigField(
|
||||
type=int, default=600, description="回复频率检查窗口时间(秒)", example=600
|
||||
),
|
||||
"enable_reply": ConfigField(type=bool, default=True, description="是否启用回复动作"),
|
||||
"enable_no_reply": ConfigField(type=bool, default=True, description="是否启用不回复动作"),
|
||||
"enable_emoji": ConfigField(type=bool, default=True, description="是否启用发送表情/图片动作"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -190,25 +191,6 @@ class CoreActionsPlugin(BasePlugin):
|
||||
EmojiAction.focus_activation_type = ActionActivationType.LLM_JUDGE
|
||||
EmojiAction.normal_activation_type = ActionActivationType.LLM_JUDGE
|
||||
|
||||
no_reply_probability = self.get_config("no_reply.random_probability", 0.8)
|
||||
NoReplyAction.random_activation_probability = no_reply_probability
|
||||
|
||||
min_judge_interval = self.get_config("no_reply.min_judge_interval", 1.0)
|
||||
NoReplyAction._min_judge_interval = min_judge_interval
|
||||
|
||||
auto_exit_message_count = self.get_config("no_reply.auto_exit_message_count", 20)
|
||||
NoReplyAction._auto_exit_message_count = auto_exit_message_count
|
||||
|
||||
max_timeout = self.get_config("no_reply.max_timeout", 600)
|
||||
NoReplyAction._max_timeout = max_timeout
|
||||
|
||||
skip_judge_when_tired = self.get_config("no_reply.skip_judge_when_tired", True)
|
||||
NoReplyAction._skip_judge_when_tired = skip_judge_when_tired
|
||||
|
||||
# 新增:频率检测相关配置
|
||||
frequency_check_window = self.get_config("no_reply.frequency_check_window", 600)
|
||||
NoReplyAction._frequency_check_window = frequency_check_window
|
||||
|
||||
# --- 根据配置注册组件 ---
|
||||
components = []
|
||||
if self.get_config("components.enable_reply", True):
|
||||
|
||||
@@ -59,6 +59,11 @@ class TTSAction(BaseAction):
|
||||
# 发送TTS消息
|
||||
await self.send_custom(message_type="tts_text", content=processed_text)
|
||||
|
||||
# 记录动作信息
|
||||
await self.store_action_info(
|
||||
action_build_into_prompt=True, action_prompt_display="已经发送了语音消息。", action_done=True
|
||||
)
|
||||
|
||||
logger.info(f"{self.log_prefix} TTS动作执行成功,文本长度: {len(processed_text)}")
|
||||
return True, "TTS动作执行成功"
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ class SearchKnowledgeTool(BaseTool):
|
||||
@staticmethod
|
||||
def _cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
|
||||
"""计算两个向量之间的余弦相似度"""
|
||||
dot_product = sum(p * q for p, q in zip(vec1, vec2))
|
||||
dot_product = sum(p * q for p, q in zip(vec1, vec2, strict=False))
|
||||
magnitude1 = math.sqrt(sum(p * p for p in vec1))
|
||||
magnitude2 = math.sqrt(sum(q * q for q in vec2))
|
||||
if magnitude1 == 0 or magnitude2 == 0:
|
||||
|
||||
@@ -187,7 +187,11 @@ class ToolExecutor:
|
||||
tool_results.append(tool_info)
|
||||
|
||||
logger.info(f"{self.log_prefix}工具{tool_name}执行成功,类型: {tool_info['type']}")
|
||||
logger.debug(f"{self.log_prefix}工具{tool_name}结果内容: {tool_info['content'][:200]}...")
|
||||
content = tool_info["content"]
|
||||
if not isinstance(content, (str, list, tuple)):
|
||||
content = str(content)
|
||||
preview = content[:200]
|
||||
logger.debug(f"{self.log_prefix}工具{tool_name}结果内容: {preview}...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"{self.log_prefix}工具{tool_name}执行失败: {e}")
|
||||
|
||||
Reference in New Issue
Block a user