Merge branch 'dev' into plugin

This commit is contained in:
UnCLAS-Prommer
2025-07-12 10:02:05 +08:00
29 changed files with 3898 additions and 518 deletions

1
.gitignore vendored
View File

@@ -317,4 +317,5 @@ run_pet.bat
!/plugins/take_picture_plugin
config.toml
interested_rates.txt

View File

@@ -1,7 +1,58 @@
[project]
name = "MaiMaiBot"
version = "0.1.0"
description = "MaiMaiBot"
name = "MaiBot"
version = "0.8.1"
description = "MaiCore 是一个基于大语言模型的可交互智能体"
requires-python = ">=3.10"
dependencies = [
"aiohttp>=3.12.14",
"apscheduler>=3.11.0",
"colorama>=0.4.6",
"cryptography>=45.0.5",
"customtkinter>=5.2.2",
"dotenv>=0.9.9",
"faiss-cpu>=1.11.0",
"fastapi>=0.116.0",
"jieba>=0.42.1",
"json-repair>=0.47.6",
"jsonlines>=4.0.0",
"maim-message>=0.3.8",
"matplotlib>=3.10.3",
"networkx>=3.4.2",
"numpy>=2.2.6",
"openai>=1.95.0",
"packaging>=25.0",
"pandas>=2.3.1",
"peewee>=3.18.2",
"pillow>=11.3.0",
"psutil>=7.0.0",
"pyarrow>=20.0.0",
"pydantic>=2.11.7",
"pymongo>=4.13.2",
"pypinyin>=0.54.0",
"python-dateutil>=2.9.0.post0",
"python-dotenv>=1.1.1",
"python-igraph>=0.11.9",
"quick-algo>=0.1.3",
"reportportal-client>=5.6.5",
"requests>=2.32.4",
"rich>=14.0.0",
"ruff>=0.12.2",
"scikit-learn>=1.7.0",
"scipy>=1.15.3",
"seaborn>=0.13.2",
"setuptools>=80.9.0",
"strawberry-graphql[fastapi]>=0.275.5",
"structlog>=25.4.0",
"toml>=0.10.2",
"tomli>=2.2.1",
"tomli-w>=1.2.0",
"tomlkit>=0.13.3",
"tqdm>=4.67.1",
"urllib3>=2.5.0",
"uvicorn>=0.35.0",
"websockets>=15.0.1",
]
[tool.ruff]

271
requirements.lock Normal file
View File

@@ -0,0 +1,271 @@
# This file was autogenerated by uv via the following command:
# uv pip compile requirements.txt -o requirements.lock
aenum==3.1.16
# via reportportal-client
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.12.14
# via
# -r requirements.txt
# maim-message
# reportportal-client
aiosignal==1.4.0
# via aiohttp
annotated-types==0.7.0
# via pydantic
anyio==4.9.0
# via
# httpx
# openai
# starlette
apscheduler==3.11.0
# via -r requirements.txt
attrs==25.3.0
# via
# aiohttp
# jsonlines
certifi==2025.7.9
# via
# httpcore
# httpx
# reportportal-client
# requests
cffi==1.17.1
# via cryptography
charset-normalizer==3.4.2
# via requests
click==8.2.1
# via uvicorn
colorama==0.4.6
# via
# -r requirements.txt
# click
# tqdm
contourpy==1.3.2
# via matplotlib
cryptography==45.0.5
# via
# -r requirements.txt
# maim-message
customtkinter==5.2.2
# via -r requirements.txt
cycler==0.12.1
# via matplotlib
darkdetect==0.8.0
# via customtkinter
distro==1.9.0
# via openai
dnspython==2.7.0
# via pymongo
dotenv==0.9.9
# via -r requirements.txt
faiss-cpu==1.11.0
# via -r requirements.txt
fastapi==0.116.0
# via
# -r requirements.txt
# maim-message
# strawberry-graphql
fonttools==4.58.5
# via matplotlib
frozenlist==1.7.0
# via
# aiohttp
# aiosignal
graphql-core==3.2.6
# via strawberry-graphql
h11==0.16.0
# via
# httpcore
# uvicorn
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via openai
idna==3.10
# via
# anyio
# httpx
# requests
# yarl
igraph==0.11.9
# via python-igraph
jieba==0.42.1
# via -r requirements.txt
jiter==0.10.0
# via openai
joblib==1.5.1
# via scikit-learn
json-repair==0.47.6
# via -r requirements.txt
jsonlines==4.0.0
# via -r requirements.txt
kiwisolver==1.4.8
# via matplotlib
maim-message==0.3.8
# via -r requirements.txt
markdown-it-py==3.0.0
# via rich
matplotlib==3.10.3
# via
# -r requirements.txt
# seaborn
mdurl==0.1.2
# via markdown-it-py
multidict==6.6.3
# via
# aiohttp
# yarl
networkx==3.5
# via -r requirements.txt
numpy==2.3.1
# via
# -r requirements.txt
# contourpy
# faiss-cpu
# matplotlib
# pandas
# scikit-learn
# scipy
# seaborn
openai==1.95.0
# via -r requirements.txt
packaging==25.0
# via
# -r requirements.txt
# customtkinter
# faiss-cpu
# matplotlib
# strawberry-graphql
pandas==2.3.1
# via
# -r requirements.txt
# seaborn
peewee==3.18.2
# via -r requirements.txt
pillow==11.3.0
# via
# -r requirements.txt
# matplotlib
propcache==0.3.2
# via
# aiohttp
# yarl
psutil==7.0.0
# via -r requirements.txt
pyarrow==20.0.0
# via -r requirements.txt
pycparser==2.22
# via cffi
pydantic==2.11.7
# via
# -r requirements.txt
# fastapi
# maim-message
# openai
pydantic-core==2.33.2
# via pydantic
pygments==2.19.2
# via rich
pymongo==4.13.2
# via -r requirements.txt
pyparsing==3.2.3
# via matplotlib
pypinyin==0.54.0
# via -r requirements.txt
python-dateutil==2.9.0.post0
# via
# -r requirements.txt
# matplotlib
# pandas
# strawberry-graphql
python-dotenv==1.1.1
# via
# -r requirements.txt
# dotenv
python-igraph==0.11.9
# via -r requirements.txt
python-multipart==0.0.20
# via strawberry-graphql
pytz==2025.2
# via pandas
quick-algo==0.1.3
# via -r requirements.txt
reportportal-client==5.6.5
# via -r requirements.txt
requests==2.32.4
# via
# -r requirements.txt
# reportportal-client
rich==14.0.0
# via -r requirements.txt
ruff==0.12.2
# via -r requirements.txt
scikit-learn==1.7.0
# via -r requirements.txt
scipy==1.16.0
# via
# -r requirements.txt
# scikit-learn
seaborn==0.13.2
# via -r requirements.txt
setuptools==80.9.0
# via -r requirements.txt
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via
# anyio
# openai
starlette==0.46.2
# via fastapi
strawberry-graphql==0.275.5
# via -r requirements.txt
structlog==25.4.0
# via -r requirements.txt
texttable==1.7.0
# via igraph
threadpoolctl==3.6.0
# via scikit-learn
toml==0.10.2
# via -r requirements.txt
tomli==2.2.1
# via -r requirements.txt
tomli-w==1.2.0
# via -r requirements.txt
tomlkit==0.13.3
# via -r requirements.txt
tqdm==4.67.1
# via
# -r requirements.txt
# openai
typing-extensions==4.14.1
# via
# fastapi
# openai
# pydantic
# pydantic-core
# strawberry-graphql
# typing-inspection
typing-inspection==0.4.1
# via pydantic
tzdata==2025.2
# via
# pandas
# tzlocal
tzlocal==5.3.1
# via apscheduler
urllib3==2.5.0
# via
# -r requirements.txt
# requests
uvicorn==0.35.0
# via
# -r requirements.txt
# maim-message
websockets==15.0.1
# via
# -r requirements.txt
# maim-message
yarl==1.20.1
# via aiohttp

View File

@@ -58,7 +58,9 @@ def hash_deduplicate(
# 保存去重后的三元组
new_triple_list_data = {}
for _, (raw_paragraph, triple_list) in enumerate(zip(raw_paragraphs.values(), triple_list_data.values())):
for _, (raw_paragraph, triple_list) in enumerate(
zip(raw_paragraphs.values(), triple_list_data.values(), strict=False)
):
# 段落hash
paragraph_hash = get_sha256(raw_paragraph)
if f"{PG_NAMESPACE}-{paragraph_hash}" in stored_pg_hashes and paragraph_hash in stored_paragraph_hashes:

View File

@@ -174,7 +174,7 @@ def main(): # sourcery skip: comprehension-to-generator, extract-method
with ThreadPoolExecutor(max_workers=workers) as executor:
future_to_hash = {
executor.submit(process_single_text, pg_hash, raw_data, llm_client_list): pg_hash
for pg_hash, raw_data in zip(all_sha256_list, all_raw_datas)
for pg_hash, raw_data in zip(all_sha256_list, all_raw_datas, strict=False)
}
with Progress(

View File

@@ -354,7 +354,7 @@ class VirtualLogDisplay:
# 为每个部分应用正确的标签
current_len = 0
for part, tag_name in zip(parts, tags):
for part, tag_name in zip(parts, tags, strict=False):
start_index = f"{start_pos}+{current_len}c"
end_index = f"{start_pos}+{current_len + len(part)}c"
self.text_widget.tag_add(tag_name, start_index, end_index)

View File

@@ -122,7 +122,7 @@ class ExpressionLearner:
min_len = min(len(s1), len(s2))
if min_len < 5:
return False
same = sum(a == b for a, b in zip(s1, s2))
same = sum(a == b for a, b in zip(s1, s2, strict=False))
return same / min_len > 0.8
async def learn_and_store_expression(self) -> Tuple[List[Tuple[str, str, str]], List[Tuple[str, str, str]]]:

View File

@@ -50,13 +50,12 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
is_mentioned, _ = is_mentioned_bot_in_message(message)
interested_rate = 0.0
if global_config.memory.enable_memory:
with Timer("记忆激活"):
interested_rate = await hippocampus_manager.get_activate_from_text(
message.processed_plain_text,
fast_retrieval=True,
)
logger.debug(f"记忆激活率: {interested_rate:.2f}")
with Timer("记忆激活"):
interested_rate = await hippocampus_manager.get_activate_from_text(
message.processed_plain_text,
fast_retrieval=False,
)
logger.debug(f"记忆激活率: {interested_rate:.2f}")
text_len = len(message.processed_plain_text)
# 根据文本长度调整兴趣度长度越大兴趣度越高但增长率递减最低0.01最高0.05
@@ -99,13 +98,15 @@ class HeartFCMessageReceiver:
userinfo = message.message_info.user_info
chat = message.chat_stream
# 2. 兴趣度计算与更新
interested_rate, is_mentioned = await _calculate_interest(message)
message.interest_value = interested_rate
await self.storage.store_message(message, chat)
subheartflow = await heartflow.get_or_create_subheartflow(chat.stream_id)
# 2. 兴趣度计算与更新
interested_rate, is_mentioned = await _calculate_interest(message)
subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned) # type: ignore
subheartflow.add_message_to_normal_chat_cache(message, interested_rate, is_mentioned)
chat_mood = mood_manager.get_mood_by_chat_id(subheartflow.chat_id) # type: ignore
asyncio.create_task(chat_mood.update_mood_by_message(message, interested_rate))

View File

@@ -59,7 +59,7 @@ EMBEDDING_SIM_THRESHOLD = 0.99
def cosine_similarity(a, b):
# 计算余弦相似度
dot = sum(x * y for x, y in zip(a, b))
dot = sum(x * y for x, y in zip(a, b, strict=False))
norm_a = math.sqrt(sum(x * x for x in a))
norm_b = math.sqrt(sum(x * x for x in b))
if norm_a == 0 or norm_b == 0:
@@ -285,7 +285,7 @@ class EmbeddingStore:
distances = list(distances.flatten())
result = [
(self.idx2hash[str(int(idx))], float(sim))
for (idx, sim) in zip(indices, distances)
for (idx, sim) in zip(indices, distances, strict=False)
if idx in range(len(self.idx2hash))
]

View File

@@ -811,7 +811,7 @@ class EntorhinalCortex:
timestamps = sample_scheduler.get_timestamp_array()
# 使用 translate_timestamp_to_human_readable 并指定 mode="normal"
readable_timestamps = [translate_timestamp_to_human_readable(ts, mode="normal") for ts in timestamps]
for _, readable_timestamp in zip(timestamps, readable_timestamps):
for _, readable_timestamp in zip(timestamps, readable_timestamps, strict=False):
logger.debug(f"回忆往事: {readable_timestamp}")
chat_samples = []
for timestamp in timestamps:

View File

@@ -112,6 +112,7 @@ class MessageRecv(Message):
self.is_mentioned = None
self.priority_mode = "interest"
self.priority_info = None
self.interest_value = None
def update_chat_stream(self, chat_stream: "ChatStream"):
self.chat_stream = chat_stream
@@ -336,6 +337,8 @@ class MessageSending(MessageProcessBase):
# 用于显示发送内容与显示不一致的情况
self.display_message = display_message
self.interest_value = 0.0
def build_reply(self):
"""设置回复消息"""
if self.reply:

View File

@@ -1,4 +1,5 @@
import re
import traceback
from typing import Union
from src.common.database.database_model import Messages, RecalledMessages, Images
@@ -35,11 +36,11 @@ class MessageStorage:
filtered_display_message = re.sub(pattern, "", display_message, flags=re.DOTALL)
else:
filtered_display_message = ""
interest_value = 0
reply_to = message.reply_to
else:
filtered_display_message = ""
interest_value = message.interest_value
reply_to = ""
chat_info_dict = chat_stream.to_dict()
@@ -79,9 +80,11 @@ class MessageStorage:
processed_plain_text=filtered_processed_plain_text,
display_message=filtered_display_message,
memorized_times=message.memorized_times,
interest_value=interest_value,
)
except Exception:
logger.exception("存储消息失败")
traceback.print_exc()
@staticmethod
async def store_recalled_message(message_id: str, time: str, chat_stream: ChatStream) -> None:

View File

@@ -273,7 +273,7 @@ class ActionModifier:
task_results = await asyncio.gather(*tasks, return_exceptions=True)
# 处理结果并更新缓存
for action_name, result in zip(task_names, task_results):
for action_name, result in zip(task_names, task_results, strict=False):
if isinstance(result, Exception):
logger.error(f"{self.log_prefix}LLM判定action {action_name} 时出错: {result}")
results[action_name] = False

View File

@@ -10,7 +10,12 @@ from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.common.logger import get_logger
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.chat.utils.chat_message_builder import (
build_readable_actions,
get_actions_by_timestamp_with_chat,
build_readable_messages,
get_raw_msg_before_timestamp_with_chat,
)
from src.chat.utils.utils import get_chat_type_and_target_info
from src.chat.planner_actions.action_manager import ActionManager
from src.chat.message_receive.chat_stream import get_chat_manager
@@ -30,9 +35,14 @@ def init_prompt():
你现在需要根据聊天内容选择的合适的action来参与聊天。
{chat_context_description},以下是具体的聊天内容:
{chat_content_block}
{moderation_prompt}
现在请你根据{by_what}选择合适的action:
你刚刚选择并执行过的action是
{actions_before_now_block}
{no_action_block}
{action_options_text}
@@ -226,6 +236,16 @@ class ActionPlanner:
show_actions=True,
)
actions_before_now = get_actions_by_timestamp_with_chat(
chat_id=self.chat_id,
timestamp_end=time.time(),
limit=5,
)
actions_before_now_block = build_readable_actions(
actions=actions_before_now,
)
self.last_obs_time_mark = time.time()
if self.mode == ChatMode.FOCUS:
@@ -289,6 +309,7 @@ class ActionPlanner:
by_what=by_what,
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block,
no_action_block=no_action_block,
action_options_text=action_options_block,
moderation_prompt=moderation_prompt_block,

View File

@@ -957,7 +957,7 @@ def weighted_sample_no_replacement(items, weights, k) -> list:
2. 不会重复选中同一个元素
"""
selected = []
pool = list(zip(items, weights))
pool = list(zip(items, weights, strict=False))
for _ in range(min(k, len(pool))):
total = sum(w for _, w in pool)
r = random.uniform(0, total)

View File

@@ -78,6 +78,60 @@ def get_raw_msg_by_timestamp_with_chat_users(
return find_messages(message_filter=filter_query, sort=sort_order, limit=limit, limit_mode=limit_mode)
def get_actions_by_timestamp_with_chat(
chat_id: str,
timestamp_start: float = 0,
timestamp_end: float = time.time(),
limit: int = 0,
limit_mode: str = "latest",
) -> List[Dict[str, Any]]:
"""获取在特定聊天从指定时间戳到指定时间戳的动作记录,按时间升序排序,返回动作记录列表"""
query = ActionRecords.select().where(
(ActionRecords.chat_id == chat_id)
& (ActionRecords.time > timestamp_start)
& (ActionRecords.time < timestamp_end)
)
if limit > 0:
if limit_mode == "latest":
query = query.order_by(ActionRecords.time.desc()).limit(limit)
# 获取后需要反转列表,以保持最终输出为时间升序
actions = list(query)
return [action.__data__ for action in reversed(actions)]
else: # earliest
query = query.order_by(ActionRecords.time.asc()).limit(limit)
else:
query = query.order_by(ActionRecords.time.asc())
actions = list(query)
return [action.__data__ for action in actions]
def get_actions_by_timestamp_with_chat_inclusive(
chat_id: str, timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest"
) -> List[Dict[str, Any]]:
"""获取在特定聊天从指定时间戳到指定时间戳的动作记录(包含边界),按时间升序排序,返回动作记录列表"""
query = ActionRecords.select().where(
(ActionRecords.chat_id == chat_id)
& (ActionRecords.time >= timestamp_start)
& (ActionRecords.time <= timestamp_end)
)
if limit > 0:
if limit_mode == "latest":
query = query.order_by(ActionRecords.time.desc()).limit(limit)
# 获取后需要反转列表,以保持最终输出为时间升序
actions = list(query)
return [action.__data__ for action in reversed(actions)]
else: # earliest
query = query.order_by(ActionRecords.time.asc()).limit(limit)
else:
query = query.order_by(ActionRecords.time.asc())
actions = list(query)
return [action.__data__ for action in actions]
def get_raw_msg_by_timestamp_random(
timestamp_start: float, timestamp_end: float, limit: int = 0, limit_mode: str = "latest"
) -> List[Dict[str, Any]]:
@@ -502,6 +556,45 @@ def build_pic_mapping_info(pic_id_mapping: Dict[str, str]) -> str:
return "\n".join(mapping_lines)
def build_readable_actions(actions: List[Dict[str, Any]]) -> str:
"""
将动作列表转换为可读的文本格式。
格式: 在()分钟前,你使用了(action_name)具体内容是action_prompt_display
Args:
actions: 动作记录字典列表。
Returns:
格式化的动作字符串。
"""
if not actions:
return ""
output_lines = []
current_time = time.time()
# The get functions return actions sorted ascending by time. Let's reverse it to show newest first.
# sorted_actions = sorted(actions, key=lambda x: x.get("time", 0), reverse=True)
for action in actions:
action_time = action.get("time", current_time)
action_name = action.get("action_name", "未知动作")
action_prompt_display = action.get("action_prompt_display", "无具体内容")
time_diff_seconds = current_time - action_time
if time_diff_seconds < 60:
time_ago_str = f"{int(time_diff_seconds)}秒前"
else:
time_diff_minutes = round(time_diff_seconds / 60)
time_ago_str = f"{int(time_diff_minutes)}分钟前"
line = f"{time_ago_str},你使用了“{action_name}”,具体内容是:“{action_prompt_display}"
output_lines.append(line)
return "\n".join(output_lines)
async def build_readable_messages_with_list(
messages: List[Dict[str, Any]],
replace_bot_name: bool = True,

View File

@@ -363,7 +363,7 @@ class ChineseTypoGenerator:
else:
# 处理多字词的单字替换
word_result = []
for _, (char, py) in enumerate(zip(word, word_pinyin)):
for _, (char, py) in enumerate(zip(word, word_pinyin, strict=False)):
# 词中的字替换概率降低
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))

View File

@@ -129,6 +129,8 @@ class Messages(BaseModel):
reply_to = TextField(null=True)
interest_value = DoubleField(null=True)
# 从 chat_info 扁平化而来的字段
chat_info_stream_id = TextField()
chat_info_platform = TextField()

View File

@@ -94,7 +94,7 @@ class ConfigBase:
raise TypeError(
f"Expected {len(field_type_args)} items for {field_type.__name__}, got {len(value)}"
)
return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args))
return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args, strict=False))
if field_origin_type is dict:
# 检查提供的value是否为dict

View File

@@ -0,0 +1,242 @@
import json
import time
from src.chat.message_receive.message import MessageRecv
from src.llm_models.utils_model import LLMRequest
from src.common.logger import get_logger
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
from src.config.config import global_config
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.manager.async_task_manager import AsyncTask, async_task_manager
from json_repair import repair_json
logger = get_logger("action")
def init_prompt():
Prompt(
"""
{chat_talking_prompt}
以上是群里正在进行的聊天记录
{indentify_block}
你现在的动作状态是:
- 手部:{hand_action}
- 上半身:{upper_body_action}
- 头部:{head_action}
现在,因为你发送了消息,或者群里其他人发送了消息,引起了你的注意,你对其进行了阅读和思考,请你更新你的动作状态。
请只按照以下json格式输出描述你新的动作状态每个动作一到三个中文词确保每个字段都存在
{{
"hand_action": "...",
"upper_body_action": "...",
"head_action": "..."
}}
""",
"change_action_prompt",
)
Prompt(
"""
{chat_talking_prompt}
以上是群里最近的聊天记录
{indentify_block}
你之前的动作状态是:
- 手部:{hand_action}
- 上半身:{upper_body_action}
- 头部:{head_action}
距离你上次关注群里消息已经过去了一段时间,你冷静了下来,你的动作会趋于平缓或静止,请你输出你现在新的动作状态,用中文。
请只按照以下json格式输出描述你新的动作状态每个动作一到三个词确保每个字段都存在
{{
"hand_action": "...",
"upper_body_action": "...",
"head_action": "..."
}}
""",
"regress_action_prompt",
)
class ChatAction:
def __init__(self, chat_id: str):
self.chat_id: str = chat_id
self.hand_action: str = "双手放在桌面"
self.upper_body_action: str = "坐着"
self.head_action: str = "注视摄像机"
self.regression_count: int = 0
self.action_model = LLMRequest(
model=global_config.model.emotion,
temperature=0.7,
request_type="action",
)
self.last_change_time = 0
async def update_action_by_message(self, message: MessageRecv):
self.regression_count = 0
message_time = message.message_info.time
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
chat_id=self.chat_id,
timestamp_start=self.last_change_time,
timestamp_end=message_time,
limit=15,
limit_mode="last",
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
prompt_personality = global_config.personality.personality_core
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
prompt = await global_prompt_manager.format_prompt(
"change_action_prompt",
chat_talking_prompt=chat_talking_prompt,
indentify_block=indentify_block,
hand_action=self.hand_action,
upper_body_action=self.upper_body_action,
head_action=self.head_action,
)
logger.info(f"prompt: {prompt}")
response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
logger.info(f"response: {response}")
logger.info(f"reasoning_content: {reasoning_content}")
action_data = json.loads(repair_json(response))
if action_data:
self.hand_action = action_data.get("hand_action", self.hand_action)
self.upper_body_action = action_data.get("upper_body_action", self.upper_body_action)
self.head_action = action_data.get("head_action", self.head_action)
self.last_change_time = message_time
async def regress_action(self):
message_time = time.time()
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
chat_id=self.chat_id,
timestamp_start=self.last_change_time,
timestamp_end=message_time,
limit=15,
limit_mode="last",
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
prompt_personality = global_config.personality.personality_core
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
prompt = await global_prompt_manager.format_prompt(
"regress_action_prompt",
chat_talking_prompt=chat_talking_prompt,
indentify_block=indentify_block,
hand_action=self.hand_action,
upper_body_action=self.upper_body_action,
head_action=self.head_action,
)
logger.info(f"prompt: {prompt}")
response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
logger.info(f"response: {response}")
logger.info(f"reasoning_content: {reasoning_content}")
action_data = json.loads(repair_json(response))
if action_data:
self.hand_action = action_data.get("hand_action", self.hand_action)
self.upper_body_action = action_data.get("upper_body_action", self.upper_body_action)
self.head_action = action_data.get("head_action", self.head_action)
self.regression_count += 1
class ActionRegressionTask(AsyncTask):
def __init__(self, action_manager: "ActionManager"):
super().__init__(task_name="ActionRegressionTask", run_interval=30)
self.action_manager = action_manager
async def run(self):
logger.debug("Running action regression task...")
now = time.time()
for action_state in self.action_manager.action_state_list:
if action_state.last_change_time == 0:
continue
if now - action_state.last_change_time > 180:
if action_state.regression_count >= 3:
continue
logger.info(f"chat {action_state.chat_id} 开始动作回归, 这是第 {action_state.regression_count + 1}")
await action_state.regress_action()
class ActionManager:
def __init__(self):
self.action_state_list: list[ChatAction] = []
"""当前动作状态"""
self.task_started: bool = False
async def start(self):
"""启动动作回归后台任务"""
if self.task_started:
return
logger.info("启动动作回归任务...")
task = ActionRegressionTask(self)
await async_task_manager.add_task(task)
self.task_started = True
logger.info("动作回归任务已启动")
def get_action_state_by_chat_id(self, chat_id: str) -> ChatAction:
for action_state in self.action_state_list:
if action_state.chat_id == chat_id:
return action_state
new_action_state = ChatAction(chat_id)
self.action_state_list.append(new_action_state)
return new_action_state
def reset_action_state_by_chat_id(self, chat_id: str):
for action_state in self.action_state_list:
if action_state.chat_id == chat_id:
action_state.hand_action = "双手放在桌面"
action_state.upper_body_action = "坐着"
action_state.head_action = "注视摄像机"
action_state.regression_count = 0
return
self.action_state_list.append(ChatAction(chat_id))
init_prompt()
action_manager = ActionManager()
"""全局动作管理器"""

View File

@@ -0,0 +1,363 @@
import asyncio
import json
import time
from src.chat.message_receive.message import MessageRecv
from src.llm_models.utils_model import LLMRequest
from src.common.logger import get_logger
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
from src.config.config import global_config
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.manager.async_task_manager import AsyncTask, async_task_manager
from src.plugin_system.apis import send_api
logger = get_logger("mood")
async def send_joy_action(chat_id: str):
action_content = {"action": "Joy_eye", "data": 1.0}
await send_api.custom_to_stream(message_type="face_emotion", content=action_content, stream_id=chat_id)
logger.info(f"[{chat_id}] 已发送 Joy 动作: {action_content}")
await asyncio.sleep(5.0)
end_action_content = {"action": "Joy_eye", "data": 0.0}
await send_api.custom_to_stream(message_type="face_emotion", content=end_action_content, stream_id=chat_id)
logger.info(f"[{chat_id}] 已发送 Joy 结束动作: {end_action_content}")
def init_prompt():
Prompt(
"""
{chat_talking_prompt}
以上是直播间里正在进行的对话
{indentify_block}
你刚刚的情绪状态是:{mood_state}
现在,发送了消息,引起了你的注意,你对其进行了阅读和思考,请你输出一句话描述你新的情绪状态,不要输出任何其他内容
请只输出情绪状态,不要输出其他内容:
""",
"change_mood_prompt",
)
Prompt(
"""
{chat_talking_prompt}
以上是直播间里最近的对话
{indentify_block}
你之前的情绪状态是:{mood_state}
距离你上次关注直播间消息已经过去了一段时间,你冷静了下来,请你输出一句话描述你现在的情绪状态
请只输出情绪状态,不要输出其他内容:
""",
"regress_mood_prompt",
)
Prompt(
"""
{chat_talking_prompt}
以上是直播间里正在进行的对话
{indentify_block}
你刚刚的情绪状态是:{mood_state}
具体来说从1-10分你的情绪状态是
喜(Joy): {joy}
怒(Anger): {anger}
哀(Sorrow): {sorrow}
乐(Pleasure): {pleasure}
惧(Fear): {fear}
现在,发送了消息,引起了你的注意,你对其进行了阅读和思考。请基于对话内容,评估你新的情绪状态。
请以JSON格式输出你新的情绪状态包含“喜怒哀乐惧”五个维度每个维度的取值范围为1-10。
键值请使用英文: "joy", "anger", "sorrow", "pleasure", "fear".
例如: {{"joy": 5, "anger": 1, "sorrow": 1, "pleasure": 5, "fear": 1}}
不要输出任何其他内容只输出JSON。
""",
"change_mood_numerical_prompt",
)
Prompt(
"""
{chat_talking_prompt}
以上是直播间里最近的对话
{indentify_block}
你之前的情绪状态是:{mood_state}
具体来说从1-10分你的情绪状态是
喜(Joy): {joy}
怒(Anger): {anger}
哀(Sorrow): {sorrow}
乐(Pleasure): {pleasure}
惧(Fear): {fear}
距离你上次关注直播间消息已经过去了一段时间,你冷静了下来。请基于此,评估你现在的情绪状态。
请以JSON格式输出你新的情绪状态包含“喜怒哀乐惧”五个维度每个维度的取值范围为1-10。
键值请使用英文: "joy", "anger", "sorrow", "pleasure", "fear".
例如: {{"joy": 5, "anger": 1, "sorrow": 1, "pleasure": 5, "fear": 1}}
不要输出任何其他内容只输出JSON。
""",
"regress_mood_numerical_prompt",
)
class ChatMood:
def __init__(self, chat_id: str):
self.chat_id: str = chat_id
self.mood_state: str = "感觉很平静"
self.mood_values: dict[str, int] = {"joy": 5, "anger": 1, "sorrow": 1, "pleasure": 5, "fear": 1}
self.regression_count: int = 0
self.mood_model = LLMRequest(
model=global_config.model.emotion,
temperature=0.7,
request_type="mood_text",
)
self.mood_model_numerical = LLMRequest(
model=global_config.model.emotion,
temperature=0.4,
request_type="mood_numerical",
)
self.last_change_time = 0
def _parse_numerical_mood(self, response: str) -> dict[str, int] | None:
try:
# The LLM might output markdown with json inside
if "```json" in response:
response = response.split("```json")[1].split("```")[0]
elif "```" in response:
response = response.split("```")[1].split("```")[0]
data = json.loads(response)
# Validate
required_keys = {"joy", "anger", "sorrow", "pleasure", "fear"}
if not required_keys.issubset(data.keys()):
logger.warning(f"Numerical mood response missing keys: {response}")
return None
for key in required_keys:
value = data[key]
if not isinstance(value, int) or not (1 <= value <= 10):
logger.warning(f"Numerical mood response invalid value for {key}: {value} in {response}")
return None
return {key: data[key] for key in required_keys}
except json.JSONDecodeError:
logger.warning(f"Failed to parse numerical mood JSON: {response}")
return None
except Exception as e:
logger.error(f"Error parsing numerical mood: {e}, response: {response}")
return None
async def update_mood_by_message(self, message: MessageRecv):
self.regression_count = 0
message_time = message.message_info.time
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
chat_id=self.chat_id,
timestamp_start=self.last_change_time,
timestamp_end=message_time,
limit=15,
limit_mode="last",
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
prompt_personality = global_config.personality.personality_core
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
async def _update_text_mood():
prompt = await global_prompt_manager.format_prompt(
"change_mood_prompt",
chat_talking_prompt=chat_talking_prompt,
indentify_block=indentify_block,
mood_state=self.mood_state,
)
logger.debug(f"text mood prompt: {prompt}")
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
logger.info(f"text mood response: {response}")
logger.debug(f"text mood reasoning_content: {reasoning_content}")
return response
async def _update_numerical_mood():
prompt = await global_prompt_manager.format_prompt(
"change_mood_numerical_prompt",
chat_talking_prompt=chat_talking_prompt,
indentify_block=indentify_block,
mood_state=self.mood_state,
joy=self.mood_values["joy"],
anger=self.mood_values["anger"],
sorrow=self.mood_values["sorrow"],
pleasure=self.mood_values["pleasure"],
fear=self.mood_values["fear"],
)
logger.info(f"numerical mood prompt: {prompt}")
response, (reasoning_content, model_name) = await self.mood_model_numerical.generate_response_async(
prompt=prompt
)
logger.info(f"numerical mood response: {response}")
logger.debug(f"numerical mood reasoning_content: {reasoning_content}")
return self._parse_numerical_mood(response)
results = await asyncio.gather(_update_text_mood(), _update_numerical_mood())
text_mood_response, numerical_mood_response = results
if text_mood_response:
self.mood_state = text_mood_response
if numerical_mood_response:
self.mood_values = numerical_mood_response
if self.mood_values.get("joy", 0) > 5:
asyncio.create_task(send_joy_action(self.chat_id))
self.last_change_time = message_time
async def regress_mood(self):
message_time = time.time()
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
chat_id=self.chat_id,
timestamp_start=self.last_change_time,
timestamp_end=message_time,
limit=15,
limit_mode="last",
)
chat_talking_prompt = build_readable_messages(
message_list_before_now,
replace_bot_name=True,
merge_messages=False,
timestamp_mode="normal_no_YMD",
read_mark=0.0,
truncate=True,
show_actions=True,
)
bot_name = global_config.bot.nickname
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
bot_nickname = ""
prompt_personality = global_config.personality.personality_core
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
async def _regress_text_mood():
prompt = await global_prompt_manager.format_prompt(
"regress_mood_prompt",
chat_talking_prompt=chat_talking_prompt,
indentify_block=indentify_block,
mood_state=self.mood_state,
)
logger.debug(f"text regress prompt: {prompt}")
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
logger.info(f"text regress response: {response}")
logger.debug(f"text regress reasoning_content: {reasoning_content}")
return response
async def _regress_numerical_mood():
prompt = await global_prompt_manager.format_prompt(
"regress_mood_numerical_prompt",
chat_talking_prompt=chat_talking_prompt,
indentify_block=indentify_block,
mood_state=self.mood_state,
joy=self.mood_values["joy"],
anger=self.mood_values["anger"],
sorrow=self.mood_values["sorrow"],
pleasure=self.mood_values["pleasure"],
fear=self.mood_values["fear"],
)
logger.debug(f"numerical regress prompt: {prompt}")
response, (reasoning_content, model_name) = await self.mood_model_numerical.generate_response_async(
prompt=prompt
)
logger.info(f"numerical regress response: {response}")
logger.debug(f"numerical regress reasoning_content: {reasoning_content}")
return self._parse_numerical_mood(response)
results = await asyncio.gather(_regress_text_mood(), _regress_numerical_mood())
text_mood_response, numerical_mood_response = results
if text_mood_response:
self.mood_state = text_mood_response
if numerical_mood_response:
self.mood_values = numerical_mood_response
if self.mood_values.get("joy", 0) > 5:
asyncio.create_task(send_joy_action(self.chat_id))
self.regression_count += 1
class MoodRegressionTask(AsyncTask):
def __init__(self, mood_manager: "MoodManager"):
super().__init__(task_name="MoodRegressionTask", run_interval=30)
self.mood_manager = mood_manager
async def run(self):
logger.debug("Running mood regression task...")
now = time.time()
for mood in self.mood_manager.mood_list:
if mood.last_change_time == 0:
continue
if now - mood.last_change_time > 180:
if mood.regression_count >= 3:
continue
logger.info(f"chat {mood.chat_id} 开始情绪回归, 这是第 {mood.regression_count + 1}")
await mood.regress_mood()
class MoodManager:
def __init__(self):
self.mood_list: list[ChatMood] = []
"""当前情绪状态"""
self.task_started: bool = False
async def start(self):
"""启动情绪回归后台任务"""
if self.task_started:
return
logger.info("启动情绪回归任务...")
task = MoodRegressionTask(self)
await async_task_manager.add_task(task)
self.task_started = True
logger.info("情绪回归任务已启动")
def get_mood_by_chat_id(self, chat_id: str) -> ChatMood:
for mood in self.mood_list:
if mood.chat_id == chat_id:
return mood
new_mood = ChatMood(chat_id)
self.mood_list.append(new_mood)
return new_mood
def reset_mood_by_chat_id(self, chat_id: str):
for mood in self.mood_list:
if mood.chat_id == chat_id:
mood.mood_state = "感觉很平静"
mood.regression_count = 0
return
self.mood_list.append(ChatMood(chat_id))
init_prompt()
mood_manager = MoodManager()
"""全局情绪管理器"""

View File

@@ -1,7 +1,18 @@
import asyncio
import math
from typing import Tuple
from src.chat.memory_system.Hippocampus import hippocampus_manager
from src.chat.message_receive.message import MessageRecv
from src.chat.message_receive.storage import MessageStorage
from src.chat.message_receive.chat_stream import get_chat_manager
from src.chat.utils.timer_calculator import Timer
from src.chat.utils.utils import is_mentioned_bot_in_message
from src.common.logger import get_logger
from src.config.config import global_config
from src.mais4u.mais4u_chat.body_emotion_action_manager import action_manager
from src.mais4u.mais4u_chat.s4u_mood_manager import mood_manager
from .s4u_chat import get_s4u_chat_manager
@@ -10,6 +21,42 @@ from .s4u_chat import get_s4u_chat_manager
logger = get_logger("chat")
async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
"""计算消息的兴趣度
Args:
message: 待处理的消息对象
Returns:
Tuple[float, bool]: (兴趣度, 是否被提及)
"""
is_mentioned, _ = is_mentioned_bot_in_message(message)
interested_rate = 0.0
if global_config.memory.enable_memory:
with Timer("记忆激活"):
interested_rate = await hippocampus_manager.get_activate_from_text(
message.processed_plain_text,
fast_retrieval=True,
)
logger.debug(f"记忆激活率: {interested_rate:.2f}")
text_len = len(message.processed_plain_text)
# 根据文本长度调整兴趣度长度越大兴趣度越高但增长率递减最低0.01最高0.05
# 采用对数函数实现递减增长
base_interest = 0.01 + (0.05 - 0.01) * (math.log10(text_len + 1) / math.log10(1000 + 1))
base_interest = min(max(base_interest, 0.01), 0.05)
interested_rate += base_interest
if is_mentioned:
interest_increase_on_mention = 1
interested_rate += interest_increase_on_mention
return interested_rate, is_mentioned
class S4UMessageProcessor:
"""心流处理器,负责处理接收到的消息并计算兴趣度"""
@@ -53,5 +100,13 @@ class S4UMessageProcessor:
else:
await s4u_chat.add_message(message)
interested_rate, _ = await _calculate_interest(message)
chat_mood = mood_manager.get_mood_by_chat_id(chat.stream_id)
asyncio.create_task(chat_mood.update_mood_by_message(message))
chat_action = action_manager.get_action_state_by_chat_id(chat.stream_id)
asyncio.create_task(chat_action.update_action_by_message(message))
# asyncio.create_task(chat_action.update_facial_expression_by_message(message, interested_rate))
# 7. 日志记录
logger.info(f"[S4U]{userinfo.user_nickname}:{message.processed_plain_text}")

View File

@@ -17,11 +17,6 @@ logger = get_logger("prompt")
def init_prompt():
Prompt("你正在qq群里聊天下面是群里在聊的内容", "chat_target_group1")
Prompt("你正在和{sender_name}聊天,这是你们之前聊的内容:", "chat_target_private1")
Prompt("在群里聊天", "chat_target_group2")
Prompt("{sender_name}私聊", "chat_target_private2")
Prompt("\n你有以下这些**知识**\n{prompt_info}\n请你**记住上面的知识**,之后可能会用到。\n", "knowledge_prompt")
Prompt("\n关于你们的关系,你需要知道:\n{relation_info}\n", "relation_prompt")
Prompt("你回想起了一些事情:\n{memory_info}\n", "memory_prompt")
@@ -252,7 +247,7 @@ def weighted_sample_no_replacement(items, weights, k) -> list:
2. 不会重复选中同一个元素
"""
selected = []
pool = list(zip(items, weights))
pool = list(zip(items, weights, strict=False))
for _ in range(min(k, len(pool))):
total = sum(w for _, w in pool)
r = random.uniform(0, total)

View File

@@ -120,27 +120,38 @@ class RelationshipFetcher:
# 按时间排序forgotten_points
current_points.sort(key=lambda x: x[2])
# 按权重加权随机抽取3个pointspoint[1]的值在1-10之间权重越高被抽到概率越大
# 按权重加权随机抽取最多3个不重复的pointspoint[1]的值在1-10之间权重越高被抽到概率越大
if len(current_points) > 3:
# point[1] 取值范围1-10直接作为权重
weights = [max(1, min(10, int(point[1]))) for point in current_points]
points = random.choices(current_points, weights=weights, k=3)
# 使用加权采样不放回,保证不重复
indices = list(range(len(current_points)))
points = []
for _ in range(3):
if not indices:
break
sub_weights = [weights[i] for i in indices]
chosen_idx = random.choices(indices, weights=sub_weights, k=1)[0]
points.append(current_points[chosen_idx])
indices.remove(chosen_idx)
else:
points = current_points
# 构建points文本
points_text = "\n".join([f"{point[2]}{point[0]}" for point in points])
info_type = await self._build_fetch_query(person_id, target_message, chat_history)
if info_type:
await self._extract_single_info(person_id, info_type, person_name)
# info_type = await self._build_fetch_query(person_id, target_message, chat_history)
# if info_type:
# await self._extract_single_info(person_id, info_type, person_name)
relation_info = self._organize_known_info()
# relation_info = self._organize_known_info()
nickname_str = ""
if person_name != nickname_str:
nickname_str = f"(ta在{platform}上的昵称是{nickname_str})"
relation_info = ""
if short_impression and relation_info:
if points_text:
relation_info = f"你对{person_name}的印象是{nickname_str}{short_impression}。具体来说:{relation_info}。你还记得ta最近做的事{points_text}"

View File

@@ -68,7 +68,7 @@ class RelationshipManager:
short_impression = await person_info_manager.get_value(person_id, "short_impression")
current_points = await person_info_manager.get_value(person_id, "points") or []
print(f"current_points: {current_points}")
# print(f"current_points: {current_points}")
if isinstance(current_points, str):
try:
current_points = json.loads(current_points)
@@ -89,7 +89,7 @@ class RelationshipManager:
points = current_points
# 构建points文本
points_text = "\n".join([f"{point[2]}{point[0]}\n" for point in points])
points_text = "\n".join([f"{point[2]}{point[0]}" for point in points])
nickname_str = await person_info_manager.get_value(person_id, "nickname")
platform = await person_info_manager.get_value(person_id, "platform")
@@ -360,19 +360,19 @@ class RelationshipManager:
# 根据熟悉度,调整印象和简短印象的最大长度
if know_times > 300:
max_impression_length = 2000
max_short_impression_length = 800
max_short_impression_length = 400
elif know_times > 100:
max_impression_length = 1000
max_short_impression_length = 500
max_short_impression_length = 250
elif know_times > 50:
max_impression_length = 500
max_short_impression_length = 300
max_short_impression_length = 150
elif know_times > 10:
max_impression_length = 200
max_short_impression_length = 100
max_short_impression_length = 60
else:
max_impression_length = 100
max_short_impression_length = 50
max_short_impression_length = 30
# 根据好感度,调整印象和简短印象的最大长度
attitude_multiplier = (abs(100 - attitude) / 100) + 1

View File

@@ -1,6 +1,5 @@
import random
import time
import json
from typing import Tuple
# 导入新插件系统
@@ -10,26 +9,22 @@ from src.plugin_system import BaseAction, ActionActivationType, ChatMode
from src.common.logger import get_logger
# 导入API模块 - 标准Python包方式
from src.plugin_system.apis import message_api, llm_api
from src.plugin_system.apis import message_api
from src.config.config import global_config
from json_repair import repair_json
logger = get_logger("core_actions")
class NoReplyAction(BaseAction):
"""不回复动作,使用智能判断机制决定何时结束等待
"""不回复动作,根据新消息的兴趣值或数量决定何时结束等待.
新的等待逻辑
- 每0.2秒检查是否有新消息(提高响应性)
- 如果累计消息数量达到阈值默认20条直接结束等待
- 有新消息时进行LLM判断但最快1秒一次防止过于频繁
- 如果判断需要回复,则结束等待;否则继续等待
- 达到最大超时时间后强制结束
新的等待逻辑:
1. 新消息累计兴趣值超过阈值 (默认10) 则结束等待
2. 累计消息数量达到随机阈值 (默认5-10条) 则结束等待
"""
focus_activation_type = ActionActivationType.ALWAYS
# focus_activation_type = ActionActivationType.RANDOM
normal_activation_type = ActionActivationType.NEVER
mode_enable = ChatMode.FOCUS
parallel_action = False
@@ -41,21 +36,11 @@ class NoReplyAction(BaseAction):
# 连续no_reply计数器
_consecutive_count = 0
# LLM判断的最小间隔时间
_min_judge_interval = 1.0 # 最快1秒一次LLM判断
# 自动结束的消息数量阈值
_auto_exit_message_count = 20 # 累计20条消息自动结束
# 最大等待超时时间
_max_timeout = 600 # 1200秒
# 跳过LLM判断的配置
_skip_judge_when_tired = True
_skip_probability = 0.5
# 新增:回复频率退出专注模式的配置
_frequency_check_window = 600 # 频率检查窗口时间(秒)
# 新增:兴趣值退出阈值
_interest_exit_threshold = 3.0
# 新增:消息数量退出阈值
_min_exit_message_count = 5
_max_exit_message_count = 10
# 动作参数定义
action_parameters = {"reason": "不回复的原因"}
@@ -67,7 +52,7 @@ class NoReplyAction(BaseAction):
associated_types = []
async def execute(self) -> Tuple[bool, str]:
"""执行不回复动作有新消息时进行判断但最快1秒一次"""
"""执行不回复动作"""
import asyncio
try:
@@ -77,30 +62,14 @@ class NoReplyAction(BaseAction):
reason = self.action_data.get("reason", "")
start_time = time.time()
last_judge_time = start_time # 上次进行LLM判断的时间
min_judge_interval = self._min_judge_interval # 最小判断间隔,从配置获取
check_interval = 0.2 # 检查新消息的间隔设为0.2秒提高响应性
check_interval = 1.0 # 每秒检查一次
# 累积判断历史
judge_history = [] # 存储每次判断的结果和理由
# 获取no_reply开始时的上下文消息10条用于后续记录
context_messages = message_api.get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=start_time - 600, # 获取开始前10分钟内的消息
end_time=start_time,
limit=10,
limit_mode="latest",
# 随机生成本次等待需要的新消息数量阈值
exit_message_count_threshold = random.randint(self._min_exit_message_count, self._max_exit_message_count)
logger.info(
f"{self.log_prefix} 本次no_reply需要 {exit_message_count_threshold} 条新消息或累计兴趣值超过 {self._interest_exit_threshold} 才能打断"
)
# 构建上下文字符串
context_str = ""
if context_messages:
context_str = message_api.build_readable_messages(
messages=context_messages, timestamp_mode="normal_no_YMD", truncate=False, show_actions=True
)
context_str = f"当时选择no_reply前的聊天上下文\n{context_str}\n"
logger.info(f"{self.log_prefix} 选择不回复(第{count}次),开始摸鱼,原因: {reason}")
# 进入等待状态
@@ -108,196 +77,64 @@ class NoReplyAction(BaseAction):
current_time = time.time()
elapsed_time = current_time - start_time
if global_config.chat.chat_mode == "auto" and self.is_group:
# 检查是否超时
if elapsed_time >= self._max_timeout or self._check_no_activity_and_exit_focus(current_time):
logger.info(
f"{self.log_prefix} 等待时间过久({self._max_timeout}或过去10分钟完全没有发言退出专注模式"
)
# 标记退出专注模式
self.action_data["_system_command"] = "stop_focus_chat"
exit_reason = f"{global_config.bot.nickname}(你)等待了{self._max_timeout}秒,或完全没有说话,感觉群里没有新内容,决定退出专注模式,稍作休息"
await self.store_action_info(
action_build_into_prompt=True,
action_prompt_display=exit_reason,
action_done=True,
)
return True, exit_reason
# 检查是否有新消息
new_message_count = message_api.count_new_messages(
# 1. 检查新消息
recent_messages_dict = message_api.get_messages_by_time_in_chat(
chat_id=self.chat_id, start_time=start_time, end_time=current_time
)
new_message_count = len(recent_messages_dict)
# 如果累计消息数量达到阈值,直接结束等待
if new_message_count >= self._auto_exit_message_count:
logger.info(f"{self.log_prefix} 累计消息数量达到{new_message_count}条,直接结束等待")
# 2. 检查消息数量是否达到阈值
if new_message_count >= exit_message_count_threshold:
logger.info(
f"{self.log_prefix} 累计消息数量达到{new_message_count}条(>{exit_message_count_threshold}),结束等待"
)
exit_reason = f"{global_config.bot.nickname}(你)看到了{new_message_count}条新消息,可以考虑一下是否要进行回复"
await self.store_action_info(
action_build_into_prompt=True,
action_build_into_prompt=False,
action_prompt_display=exit_reason,
action_done=True,
)
return True, f"累计消息数量达到{new_message_count}条,直接结束等待 (等待时间: {elapsed_time:.1f}秒)"
return True, f"累计消息数量达到{new_message_count}条,结束等待 (等待时间: {elapsed_time:.1f}秒)"
# 判定条件累计3条消息或等待超过5秒且有新消息
time_since_last_judge = current_time - last_judge_time
should_judge, trigger_reason = self._should_trigger_judge(new_message_count, time_since_last_judge)
if should_judge and time_since_last_judge >= min_judge_interval:
logger.info(f"{self.log_prefix} 触发判定({trigger_reason}),进行智能判断...")
# 获取最近的消息内容用于判断
recent_messages = message_api.get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=start_time,
end_time=current_time,
)
if recent_messages:
# 使用message_api构建可读的消息字符串
messages_text = message_api.build_readable_messages(
messages=recent_messages,
timestamp_mode="normal_no_YMD",
truncate=False,
show_actions=False,
# 3. 检查累计兴趣值
if new_message_count > 0:
accumulated_interest = 0.0
for msg_dict in recent_messages_dict:
text = msg_dict.get("processed_plain_text", "")
interest_value = msg_dict.get("interest_value", 0.0)
if text:
accumulated_interest += interest_value
logger.info(f"{self.log_prefix} 当前累计兴趣值: {accumulated_interest:.2f}")
if accumulated_interest >= self._interest_exit_threshold:
logger.info(
f"{self.log_prefix} 累计兴趣值达到{accumulated_interest:.2f}(>{self._interest_exit_threshold}),结束等待"
)
exit_reason = f"{global_config.bot.nickname}(你)感觉到了大家浓厚的兴趣(兴趣值{accumulated_interest:.1f}),决定重新加入讨论"
await self.store_action_info(
action_build_into_prompt=False,
action_prompt_display=exit_reason,
action_done=True,
)
return (
True,
f"累计兴趣值达到{accumulated_interest:.2f},结束等待 (等待时间: {elapsed_time:.1f}秒)",
)
# 获取身份信息
bot_name = global_config.bot.nickname
bot_nickname = ""
if global_config.bot.alias_names:
bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
bot_core_personality = global_config.personality.personality_core
identity_block = f"你的名字是{bot_name}{bot_nickname},你{bot_core_personality}"
# 构建判断历史字符串最多显示3条
history_block = ""
if judge_history:
history_block = "之前的判断历史:\n"
# 只取最近的3条历史记录
recent_history = judge_history[-3:] if len(judge_history) > 3 else judge_history
for i, (timestamp, judge_result, reason) in enumerate(recent_history, 1):
elapsed_seconds = int(timestamp - start_time)
history_block += f"{i}. 等待{elapsed_seconds}秒时判断:{judge_result},理由:{reason}\n"
history_block += "\n"
# 检查过去10分钟的发言频率
frequency_block, should_skip_llm_judge = self._get_fatigue_status(current_time)
# 如果决定跳过LLM判断直接更新时间并继续等待
if should_skip_llm_judge:
logger.info(f"{self.log_prefix} 疲劳,继续等待。")
last_judge_time = time.time() # 更新判断时间,避免立即重新判断
start_time = current_time # 更新消息检查的起始时间,以避免重复判断
continue # 跳过本次LLM判断继续循环等待
# 构建判断上下文
chat_context = "QQ群" if self.is_group else "私聊"
judge_prompt = f"""
{identity_block}
你现在正在{chat_context}参与聊天,以下是聊天内容:
{context_str}
在以上的聊天中,你选择了暂时不回复,现在,你看到了新的聊天消息如下:
{messages_text}
{history_block}
请注意:{frequency_block}
请你判断,是否要结束不回复的状态,重新加入聊天讨论。
判断标准:
1. 如果有人直接@你、提到你的名字或明确向你询问,应该回复
2. 如果话题发生重要变化,需要你参与讨论,应该回复
3. 如果只是普通闲聊、重复内容或与你无关的讨论,不需要回复
4. 如果消息内容过于简单(如单纯的表情、"哈哈"等),不需要回复
5. 参考之前的判断历史,如果情况有明显变化或持续等待时间过长,考虑调整判断
请用JSON格式回复你的判断严格按照以下格式
{{
"should_reply": true/false,
"reason": "详细说明你的判断理由"
}}
"""
try:
# 获取可用的模型配置
available_models = llm_api.get_available_models()
# 使用 utils_small 模型
small_model = getattr(available_models, "utils_small", None)
logger.debug(judge_prompt)
if small_model:
# 使用小模型进行判断
success, response, reasoning, model_name = await llm_api.generate_with_model(
prompt=judge_prompt,
model_config=small_model,
request_type="plugin.no_reply_judge",
temperature=0.7, # 进一步降低温度提高JSON输出的一致性和准确性
)
# 更新上次判断时间
last_judge_time = time.time()
if success and response:
response = response.strip()
logger.debug(f"{self.log_prefix} 模型({model_name})原始JSON响应: {response}")
# 解析LLM的JSON响应提取判断结果和理由
judge_result, reason = self._parse_llm_judge_response(response)
if judge_result:
logger.info(f"{self.log_prefix} 决定继续参与讨论,结束等待,原因: {reason}")
else:
logger.info(f"{self.log_prefix} 决定不参与讨论,继续等待,原因: {reason}")
# 将判断结果保存到历史中
judge_history.append((current_time, judge_result, reason))
if judge_result == "需要回复":
# logger.info(f"{self.log_prefix} 模型判断需要回复,结束等待")
full_prompt = f"{global_config.bot.nickname}(你)的想法是:{reason}"
await self.store_action_info(
action_build_into_prompt=True,
action_prompt_display=full_prompt,
action_done=True,
)
return True, f"检测到需要回复的消息,结束等待 (等待时间: {elapsed_time:.1f}秒)"
else:
logger.info(f"{self.log_prefix} 模型判断不需要回复,理由: {reason},继续等待")
# 更新开始时间,避免重复判断同样的消息
start_time = current_time
else:
logger.warning(f"{self.log_prefix} 模型判断失败,继续等待")
else:
logger.warning(f"{self.log_prefix} 未找到可用的模型配置,继续等待")
last_judge_time = time.time() # 即使失败也更新时间,避免频繁重试
except Exception as e:
logger.error(f"{self.log_prefix} 模型判断异常: {e},继续等待")
last_judge_time = time.time() # 异常时也更新时间,避免频繁重试
# 每10秒输出一次等待状态
if elapsed_time < 60:
if int(elapsed_time) % 10 == 0 and int(elapsed_time) > 0:
logger.debug(f"{self.log_prefix} 已等待{elapsed_time:.0f}秒,等待新消息...")
await asyncio.sleep(1)
else:
if int(elapsed_time) % 180 == 0 and int(elapsed_time) > 0:
logger.info(f"{self.log_prefix} 已等待{elapsed_time / 60:.0f}分钟,等待新消息...")
await asyncio.sleep(1)
if int(elapsed_time) > 0 and int(elapsed_time) % 10 == 0:
logger.debug(
f"{self.log_prefix} 已等待{elapsed_time:.0f}秒,累计{new_message_count}条消息,继续等待..."
)
# 使用 asyncio.sleep(1) 来避免在同一秒内重复打印日志
await asyncio.sleep(1)
# 短暂等待后继续检查
await asyncio.sleep(check_interval)
except Exception as e:
logger.error(f"{self.log_prefix} 不回复动作执行失败: {e}")
# 即使执行失败也要记录
exit_reason = f"执行异常: {str(e)}"
full_prompt = f"{context_str}{exit_reason},你思考是否要进行回复"
full_prompt = f"no_reply执行异常: {exit_reason},你思考是否要进行回复"
await self.store_action_info(
action_build_into_prompt=True,
action_prompt_display=full_prompt,
@@ -305,215 +142,6 @@ class NoReplyAction(BaseAction):
)
return False, f"不回复动作执行失败: {e}"
def _should_trigger_judge(self, new_message_count: int, time_since_last_judge: float) -> Tuple[bool, str]:
"""判断是否应该触发智能判断,并返回触发原因。
Args:
new_message_count: 新消息的数量。
time_since_last_judge: 距离上次判断的时间。
Returns:
一个元组 (should_judge, reason)。
- should_judge: 一个布尔值,指示是否应该触发判断。
- reason: 触发判断的原因字符串。
"""
# 判定条件累计3条消息或等待超过15秒且有新消息
should_judge_flag = new_message_count >= 3 or (new_message_count > 0 and time_since_last_judge >= 15.0)
if not should_judge_flag:
return False, ""
# 判断触发原因
if new_message_count >= 3:
return True, f"累计{new_message_count}条消息"
elif new_message_count > 0 and time_since_last_judge >= 15.0:
return True, f"等待{time_since_last_judge:.1f}秒且有新消息"
return False, ""
def _get_fatigue_status(self, current_time: float) -> Tuple[str, bool]:
"""
根据最近的发言频率生成疲劳提示,并决定是否跳过判断。
Args:
current_time: 当前时间戳。
Returns:
一个元组 (frequency_block, should_skip_judge)。
- frequency_block: 疲劳度相关的提示字符串。
- should_skip_judge: 是否应该跳过LLM判断的布尔值。
"""
try:
# 获取过去10分钟的所有消息
past_10min_time = current_time - 600 # 10分钟前
all_messages_10min = message_api.get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=past_10min_time,
end_time=current_time,
)
# 手动过滤bot自己的消息
bot_message_count = 0
if all_messages_10min:
user_id = global_config.bot.qq_account
for message in all_messages_10min:
sender_id = message.get("user_id", "")
if sender_id == user_id:
bot_message_count += 1
talk_frequency_threshold = global_config.chat.get_current_talk_frequency(self.chat_id) * 10
if bot_message_count > talk_frequency_threshold:
over_count = bot_message_count - talk_frequency_threshold
skip_probability = 0
frequency_block = ""
if over_count <= 3:
frequency_block = "你感觉稍微有些累,回复的有点多了。\n"
elif over_count <= 5:
frequency_block = "你今天说话比较多,感觉有点疲惫,想要稍微休息一下。\n"
elif over_count <= 8:
frequency_block = "你发现自己说话太多了,感觉很累,想要安静一会儿,除非有重要的事情否则不想回复。\n"
skip_probability = self._skip_probability
else:
frequency_block = "你感觉非常累,想要安静一会儿。\n"
skip_probability = 1
should_skip_judge = self._skip_judge_when_tired and random.random() < skip_probability
if should_skip_judge:
logger.info(
f"{self.log_prefix} 发言过多(超过{over_count}条)随机决定跳过此次LLM判断(概率{skip_probability * 100:.0f}%)"
)
logger.info(
f"{self.log_prefix} 过去10分钟发言{bot_message_count}条,超过阈值{talk_frequency_threshold},添加疲惫提示"
)
return frequency_block, should_skip_judge
else:
# 回复次数少时的正向提示
under_count = talk_frequency_threshold - bot_message_count
frequency_block = ""
if under_count >= talk_frequency_threshold * 0.8:
frequency_block = "你感觉精力充沛,状态很好,积极参与聊天。\n"
elif under_count >= talk_frequency_threshold * 0.5:
frequency_block = "你感觉状态不错。\n"
logger.info(
f"{self.log_prefix} 过去10分钟发言{bot_message_count}条,未超过阈值{talk_frequency_threshold},添加正向提示"
)
return frequency_block, False
except Exception as e:
logger.warning(f"{self.log_prefix} 检查发言频率时出错: {e}")
return "", False
def _check_no_activity_and_exit_focus(self, current_time: float) -> bool:
"""检查过去10分钟是否完全没有发言决定是否退出专注模式
Args:
current_time: 当前时间戳
Returns:
bool: 是否应该退出专注模式
"""
try:
# 只在auto模式下进行检查
if global_config.chat.chat_mode != "auto":
return False
# 获取过去10分钟的所有消息
past_10min_time = current_time - 600 # 10分钟前
all_messages = message_api.get_messages_by_time_in_chat(
chat_id=self.chat_id,
start_time=past_10min_time,
end_time=current_time,
)
if not all_messages:
# 如果完全没有消息,也不需要退出专注模式
return False
# 统计bot自己的回复数量
bot_message_count = 0
user_id = global_config.bot.qq_account
for message in all_messages:
sender_id = message.get("user_id", "")
if sender_id == user_id:
bot_message_count += 1
# 如果过去10分钟bot一条消息也没有发送退出专注模式
if bot_message_count == 0:
logger.info(f"{self.log_prefix} 过去10分钟bot完全没有发言准备退出专注模式")
return True
else:
logger.debug(f"{self.log_prefix} 过去10分钟bot发言{bot_message_count}条,继续保持专注模式")
return False
except Exception as e:
logger.error(f"{self.log_prefix} 检查无活动状态时出错: {e}")
return False
def _parse_llm_judge_response(self, response: str) -> tuple[str, str]:
"""解析LLM判断响应使用JSON格式提取判断结果和理由
Args:
response: LLM的原始JSON响应
Returns:
tuple: (判断结果, 理由)
"""
try:
# 使用repair_json修复可能有问题的JSON格式
fixed_json_string = repair_json(response)
logger.debug(f"{self.log_prefix} repair_json修复后的响应: {fixed_json_string}")
# 如果repair_json返回的是字符串需要解析为Python对象
if isinstance(fixed_json_string, str):
result_json = json.loads(fixed_json_string)
else:
# 如果repair_json直接返回了字典对象直接使用
result_json = fixed_json_string
# 从JSON中提取判断结果和理由
should_reply = result_json.get("should_reply", False)
reason = result_json.get("reason", "无法获取判断理由")
# 转换布尔值为中文字符串
judge_result = "需要回复" if should_reply else "不需要回复"
logger.debug(f"{self.log_prefix} JSON解析成功 - 判断: {judge_result}, 理由: {reason}")
return judge_result, reason
except (json.JSONDecodeError, KeyError, TypeError) as e:
logger.warning(f"{self.log_prefix} JSON解析失败尝试文本解析: {e}")
# 如果JSON解析失败回退到简单的关键词匹配
try:
response_lower = response.lower()
if "true" in response_lower or "需要回复" in response:
judge_result = "需要回复"
reason = "从响应文本中检测到需要回复的指示"
elif "false" in response_lower or "不需要回复" in response:
judge_result = "不需要回复"
reason = "从响应文本中检测到不需要回复的指示"
else:
judge_result = "不需要回复" # 默认值
reason = f"无法解析响应格式,使用默认判断。原始响应: {response[:100]}..."
logger.debug(f"{self.log_prefix} 文本解析结果 - 判断: {judge_result}, 理由: {reason}")
return judge_result, reason
except Exception as fallback_e:
logger.error(f"{self.log_prefix} 文本解析也失败: {fallback_e}")
return "不需要回复", f"解析异常: {str(e)}, 回退解析也失败: {str(fallback_e)}"
except Exception as e:
logger.error(f"{self.log_prefix} 解析LLM响应时出错: {e}")
return "不需要回复", f"解析异常: {str(e)}"
@classmethod
def reset_consecutive_count(cls):
"""重置连续计数器"""

View File

@@ -9,6 +9,7 @@ import random
import time
from typing import List, Tuple, Type
import asyncio
import re
# 导入新插件系统
from src.plugin_system import BasePlugin, register_plugin, BaseAction, ComponentInfo, ActionActivationType, ChatMode
@@ -54,12 +55,26 @@ class ReplyAction(BaseAction):
# 关联类型
associated_types = ["text"]
def _parse_reply_target(self, target_message: str) -> tuple:
sender = ""
target = ""
if ":" in target_message or "" in target_message:
# 使用正则表达式匹配中文或英文冒号
parts = re.split(pattern=r"[:]", string=target_message, maxsplit=1)
if len(parts) == 2:
sender = parts[0].strip()
target = parts[1].strip()
return sender, target
async def execute(self) -> Tuple[bool, str]:
"""执行回复动作"""
logger.info(f"{self.log_prefix} 决定进行回复")
start_time = self.action_data.get("loop_start_time", time.time())
reply_to = self.action_data.get("reply_to", "")
sender, target = self._parse_reply_target(reply_to)
try:
try:
success, reply_set = await asyncio.wait_for(
@@ -105,6 +120,11 @@ class ReplyAction(BaseAction):
reply_text += data
# 存储动作记录
if sender and target:
reply_text = f"你对{sender}说的{target},进行了回复:{reply_text}"
else:
reply_text = f"你进行发言:{reply_text}"
await self.store_action_info(
action_build_into_prompt=False,
action_prompt_display=reply_text,
@@ -144,37 +164,18 @@ class CoreActionsPlugin(BasePlugin):
config_section_descriptions = {
"plugin": "插件启用配置",
"components": "核心组件启用配置",
"no_reply": "不回复动作配置(智能等待机制)",
}
# 配置Schema定义
config_schema = {
"plugin": {
"enabled": ConfigField(type=bool, default=True, description="是否启用插件"),
"config_version": ConfigField(type=str, default="0.3.1", description="配置文件版本"),
"enabled": ConfigField(type=bool, default=False, description="是否启用插件"),
"config_version": ConfigField(type=str, default="0.4.0", description="配置文件版本"),
},
"components": {
"enable_reply": ConfigField(type=bool, default=True, description="是否启用'回复'动作"),
"enable_no_reply": ConfigField(type=bool, default=True, description="是否启用'不回复'动作"),
"enable_emoji": ConfigField(type=bool, default=True, description="是否启用'表情'动作"),
},
"no_reply": {
"max_timeout": ConfigField(type=int, default=1200, description="最大等待超时时间(秒)"),
"min_judge_interval": ConfigField(
type=float, default=1.0, description="LLM判断的最小间隔时间防止过于频繁"
),
"auto_exit_message_count": ConfigField(
type=int, default=20, description="累计消息数量达到此阈值时自动结束等待"
),
"random_probability": ConfigField(
type=float, default=0.8, description="Focus模式下随机选择不回复的概率0.0到1.0", example=0.8
),
"skip_judge_when_tired": ConfigField(
type=bool, default=True, description="当发言过多时是否启用跳过LLM判断机制"
),
"frequency_check_window": ConfigField(
type=int, default=600, description="回复频率检查窗口时间(秒)", example=600
),
"enable_reply": ConfigField(type=bool, default=True, description="是否启用回复动作"),
"enable_no_reply": ConfigField(type=bool, default=True, description="是否启用不回复动作"),
"enable_emoji": ConfigField(type=bool, default=True, description="是否启用发送表情/图片动作"),
},
}
@@ -192,25 +193,6 @@ class CoreActionsPlugin(BasePlugin):
EmojiAction.focus_activation_type = ActionActivationType.LLM_JUDGE
EmojiAction.normal_activation_type = ActionActivationType.LLM_JUDGE
no_reply_probability = self.get_config("no_reply.random_probability", 0.8)
NoReplyAction.random_activation_probability = no_reply_probability
min_judge_interval = self.get_config("no_reply.min_judge_interval", 1.0)
NoReplyAction._min_judge_interval = min_judge_interval
auto_exit_message_count = self.get_config("no_reply.auto_exit_message_count", 20)
NoReplyAction._auto_exit_message_count = auto_exit_message_count
max_timeout = self.get_config("no_reply.max_timeout", 600)
NoReplyAction._max_timeout = max_timeout
skip_judge_when_tired = self.get_config("no_reply.skip_judge_when_tired", True)
NoReplyAction._skip_judge_when_tired = skip_judge_when_tired
# 新增:频率检测相关配置
frequency_check_window = self.get_config("no_reply.frequency_check_window", 600)
NoReplyAction._frequency_check_window = frequency_check_window
# --- 根据配置注册组件 ---
components = []
if self.get_config("components.enable_reply", True):

View File

@@ -54,7 +54,7 @@ class SearchKnowledgeTool(BaseTool):
@staticmethod
def _cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
"""计算两个向量之间的余弦相似度"""
dot_product = sum(p * q for p, q in zip(vec1, vec2))
dot_product = sum(p * q for p, q in zip(vec1, vec2, strict=False))
magnitude1 = math.sqrt(sum(p * p for p in vec1))
magnitude2 = math.sqrt(sum(q * q for q in vec2))
if magnitude1 == 0 or magnitude2 == 0:

2656
uv.lock generated Normal file

File diff suppressed because it is too large Load Diff