迁移:1f91967(remove:移除willing系统,移除reply2,移除能量值,移除reply_to改为message)

This commit is contained in:
Windpicker-owo
2025-09-03 21:27:28 +08:00
parent a63ca537d1
commit 53e72ecbdb
22 changed files with 151 additions and 947 deletions

View File

@@ -1,6 +1,8 @@
import asyncio import asyncio
import time import time
import traceback import traceback
import math
import random
from typing import Optional, Dict, Any, Tuple from typing import Optional, Dict, Any, Tuple
from src.chat.message_receive.chat_stream import get_chat_manager from src.chat.message_receive.chat_stream import get_chat_manager
@@ -10,7 +12,7 @@ from src.config.config import global_config
from src.chat.planner_actions.planner import ActionPlanner from src.chat.planner_actions.planner import ActionPlanner
from src.chat.planner_actions.action_modifier import ActionModifier from src.chat.planner_actions.action_modifier import ActionModifier
from src.person_info.person_info import get_person_info_manager from src.person_info.person_info import get_person_info_manager
from src.plugin_system.apis import database_api from src.plugin_system.apis import database_api, generator_api
from src.plugin_system.base.component_types import ChatMode from src.plugin_system.base.component_types import ChatMode
from src.mais4u.constant_s4u import ENABLE_S4U from src.mais4u.constant_s4u import ENABLE_S4U
from src.chat.chat_loop.hfc_utils import send_typing, stop_typing from src.chat.chat_loop.hfc_utils import send_typing, stop_typing
@@ -44,7 +46,6 @@ class CycleProcessor:
async def _send_and_store_reply( async def _send_and_store_reply(
self, self,
response_set, response_set,
reply_to_str,
loop_start_time, loop_start_time,
action_message, action_message,
cycle_timers: Dict[str, float], cycle_timers: Dict[str, float],
@@ -52,7 +53,7 @@ class CycleProcessor:
actions, actions,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]: ) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
with Timer("回复发送", cycle_timers): with Timer("回复发送", cycle_timers):
reply_text = await self.response_handler.send_response(response_set, reply_to_str, loop_start_time, action_message) reply_text = await self.response_handler.send_response(response_set, loop_start_time, action_message)
# 存储reply action信息 # 存储reply action信息
person_info_manager = get_person_info_manager() person_info_manager = get_person_info_manager()
@@ -60,7 +61,7 @@ class CycleProcessor:
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值 # 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
platform = action_message.get("chat_info_platform") platform = action_message.get("chat_info_platform")
if platform is None: if platform is None:
platform = getattr(self.chat_stream, "platform", "unknown") platform = getattr(self.context.chat_stream, "platform", "unknown")
person_id = person_info_manager.get_person_id( person_id = person_info_manager.get_person_id(
platform, platform,
@@ -75,7 +76,7 @@ class CycleProcessor:
action_prompt_display=action_prompt_display, action_prompt_display=action_prompt_display,
action_done=True, action_done=True,
thinking_id=thinking_id, thinking_id=thinking_id,
action_data={"reply_text": reply_text, "reply_to": reply_to_str}, action_data={"reply_text": reply_text},
action_name="reply", action_name="reply",
) )
@@ -113,12 +114,6 @@ class CycleProcessor:
""" """
action_type = "no_action" action_type = "no_action"
reply_text = "" # 初始化reply_text变量避免UnboundLocalError reply_text = "" # 初始化reply_text变量避免UnboundLocalError
reply_to_str = "" # 初始化reply_to_str变量
# 根据interest_value计算概率决定使用哪种planner模式
# interest_value越高越倾向于使用Normal模式
import random
import math
# 使用sigmoid函数将interest_value转换为概率 # 使用sigmoid函数将interest_value转换为概率
# 当interest_value为0时概率接近0使用Focus模式 # 当interest_value为0时概率接近0使用Focus模式
@@ -224,40 +219,23 @@ class CycleProcessor:
"command": command "command": command
} }
else: else:
# 执行回复动作
try: try:
reply_to_str = await self._build_reply_to_str(action_info["action_message"]) success, response_set, _ = await generator_api.generate_reply(
except UserWarning: chat_stream=self.context.chat_stream,
logger.warning("选取了自己作为回复对象,跳过回复生成") reply_message = action_info["action_message"],
return { available_actions=available_actions,
"action_type": "reply", enable_tool=global_config.tool.enable_tool,
"success": False, request_type="chat.replyer",
"reply_text": "", from_plugin=False,
"loop_info": None
}
# 生成回复
gather_timeout = global_config.chat.thinking_timeout
try:
response_set = await asyncio.wait_for(
self.response_handler.generate_response(
message_data=action_info["action_message"],
available_actions=action_info["available_actions"],
reply_to=reply_to_str,
request_type="chat.replyer",
),
timeout=gather_timeout
) )
except asyncio.TimeoutError: if not success or not response_set:
logger.warning( logger.info(f"{action_info['action_message'].get('processed_plain_text')} 的回复生成失败")
f"{self.log_prefix} 并行执行:回复生成超时>{global_config.chat.thinking_timeout}s已跳过" return {
) "action_type": "reply",
return { "success": False,
"action_type": "reply", "reply_text": "",
"success": False, "loop_info": None
"reply_text": "", }
"loop_info": None
}
except asyncio.CancelledError: except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消") logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消")
return { return {
@@ -267,18 +245,8 @@ class CycleProcessor:
"loop_info": None "loop_info": None
} }
if not response_set:
logger.warning(f"{self.log_prefix} 模型超时或生成回复内容为空")
return {
"action_type": "reply",
"success": False,
"reply_text": "",
"loop_info": None
}
loop_info, reply_text, cycle_timers_reply = await self._send_and_store_reply( loop_info, reply_text, cycle_timers_reply = await self._send_and_store_reply(
response_set, response_set,
reply_to_str,
loop_start_time, loop_start_time,
action_info["action_message"], action_info["action_message"],
cycle_timers, cycle_timers,
@@ -303,7 +271,6 @@ class CycleProcessor:
} }
# 创建所有动作的后台任务 # 创建所有动作的后台任务
action_tasks = [asyncio.create_task(execute_action(action)) for action in actions] action_tasks = [asyncio.create_task(execute_action(action)) for action in actions]
# 并行执行所有任务 # 并行执行所有任务
@@ -367,7 +334,6 @@ class CycleProcessor:
self.context.chat_instance.cycle_tracker.end_cycle(loop_info, cycle_timers) self.context.chat_instance.cycle_tracker.end_cycle(loop_info, cycle_timers)
self.context.chat_instance.cycle_tracker.print_cycle_info(cycle_timers) self.context.chat_instance.cycle_tracker.print_cycle_info(cycle_timers)
# await self.willing_manager.after_generate_reply_handle(message_data.get("message_id", ""))
action_type = actions[0]["action_type"] if actions else "no_action" action_type = actions[0]["action_type"] if actions else "no_action"
# 管理no_reply计数器当执行了非no_reply动作时重置计数器 # 管理no_reply计数器当执行了非no_reply动作时重置计数器
if action_type != "no_reply": if action_type != "no_reply":
@@ -688,36 +654,6 @@ class CycleProcessor:
"action_prompt": "", "action_prompt": "",
} }
async def _build_reply_to_str(self, message_data: dict):
"""
构建回复目标字符串
Args:
message_data: 消息数据字典
Returns:
str: 格式化的回复目标字符串,格式为"用户名:消息内容"
功能说明:
- 从消息数据中提取平台和用户ID信息
- 通过人员信息管理器获取用户昵称
- 构建用于回复显示的格式化字符串
"""
from src.person_info.person_info import get_person_info_manager
person_info_manager = get_person_info_manager()
platform = (
message_data.get("chat_info_platform")
or message_data.get("user_platform")
or (self.context.chat_stream.platform if self.context.chat_stream else "default")
)
user_id = message_data.get("user_id", "")
if user_id == str(global_config.bot.qq_account) and platform == global_config.bot.platform:
raise UserWarning
person_id = person_info_manager.get_person_id(platform, user_id)
person_name = await person_info_manager.get_value(person_id, "person_name")
return f"{person_name}:{message_data.get('processed_plain_text')}"
def _build_final_loop_info(self, reply_loop_info, action_success, action_reply_text, action_command, plan_result): def _build_final_loop_info(self, reply_loop_info, action_success, action_reply_text, action_command, plan_result):
""" """
构建最终的循环信息 构建最终的循环信息

View File

@@ -57,26 +57,6 @@ class EnergyManager:
await asyncio.sleep(0) await asyncio.sleep(0)
logger.info(f"{self.context.log_prefix} 能量管理器已停止") logger.info(f"{self.context.log_prefix} 能量管理器已停止")
def _handle_energy_completion(self, task: asyncio.Task):
"""
处理能量循环任务完成
Args:
task: 完成的异步任务对象
功能说明:
- 处理任务正常完成或异常情况
- 记录相应的日志信息
- 区分取消和异常终止的情况
"""
try:
if exception := task.exception():
logger.error(f"{self.context.log_prefix} 能量循环异常: {exception}")
else:
logger.info(f"{self.context.log_prefix} 能量循环正常结束")
except asyncio.CancelledError:
logger.info(f"{self.context.log_prefix} 能量循环被取消")
async def _energy_loop(self): async def _energy_loop(self):
""" """
能量与睡眠压力管理的主循环 能量与睡眠压力管理的主循环

View File

@@ -12,14 +12,12 @@ from src.chat.express.expression_learner import expression_learner_manager
from src.plugin_system.base.component_types import ChatMode from src.plugin_system.base.component_types import ChatMode
from src.schedule.schedule_manager import schedule_manager, SleepState from src.schedule.schedule_manager import schedule_manager, SleepState
from src.plugin_system.apis import message_api from src.plugin_system.apis import message_api
from src.chat.willing.willing_manager import get_willing_manager
from .hfc_context import HfcContext from .hfc_context import HfcContext
from .energy_manager import EnergyManager from .energy_manager import EnergyManager
from .proactive_thinker import ProactiveThinker from .proactive_thinker import ProactiveThinker
from .cycle_processor import CycleProcessor from .cycle_processor import CycleProcessor
from .response_handler import ResponseHandler from .response_handler import ResponseHandler
from .normal_mode_handler import NormalModeHandler
from .cycle_tracker import CycleTracker from .cycle_tracker import CycleTracker
from .wakeup_manager import WakeUpManager from .wakeup_manager import WakeUpManager
@@ -47,7 +45,6 @@ class HeartFChatting:
self.cycle_processor = CycleProcessor(self.context, self.response_handler, self.cycle_tracker) self.cycle_processor = CycleProcessor(self.context, self.response_handler, self.cycle_tracker)
self.energy_manager = EnergyManager(self.context) self.energy_manager = EnergyManager(self.context)
self.proactive_thinker = ProactiveThinker(self.context, self.cycle_processor) self.proactive_thinker = ProactiveThinker(self.context, self.cycle_processor)
self.normal_mode_handler = NormalModeHandler(self.context, self.cycle_processor)
self.wakeup_manager = WakeUpManager(self.context) self.wakeup_manager = WakeUpManager(self.context)
# 将唤醒度管理器设置到上下文中 # 将唤醒度管理器设置到上下文中
@@ -60,7 +57,6 @@ class HeartFChatting:
# 记录最近3次的兴趣度 # 记录最近3次的兴趣度
self.recent_interest_records: deque = deque(maxlen=3) self.recent_interest_records: deque = deque(maxlen=3)
self.willing_manager = get_willing_manager()
self._initialize_chat_mode() self._initialize_chat_mode()
logger.info(f"{self.context.log_prefix} HeartFChatting 初始化完成") logger.info(f"{self.context.log_prefix} HeartFChatting 初始化完成")
@@ -97,7 +93,7 @@ class HeartFChatting:
self.context.relationship_builder = relationship_builder_manager.get_or_create_builder(self.context.stream_id) self.context.relationship_builder = relationship_builder_manager.get_or_create_builder(self.context.stream_id)
self.context.expression_learner = expression_learner_manager.get_expression_learner(self.context.stream_id) self.context.expression_learner = expression_learner_manager.get_expression_learner(self.context.stream_id)
await self.energy_manager.start() #await self.energy_manager.start()
await self.proactive_thinker.start() await self.proactive_thinker.start()
await self.wakeup_manager.start() await self.wakeup_manager.start()
@@ -120,7 +116,7 @@ class HeartFChatting:
return return
self.context.running = False self.context.running = False
await self.energy_manager.stop() #await self.energy_manager.stop()
await self.proactive_thinker.stop() await self.proactive_thinker.stop()
await self.wakeup_manager.stop() await self.wakeup_manager.stop()
@@ -245,8 +241,6 @@ class HeartFChatting:
# 统一使用 _should_process_messages 判断是否应该处理 # 统一使用 _should_process_messages 判断是否应该处理
should_process,interest_value = await self._should_process_messages(recent_messages if has_new_messages else None) should_process,interest_value = await self._should_process_messages(recent_messages if has_new_messages else None)
if should_process: if should_process:
#earliest_message_data = recent_messages[0]
#self.last_read_time = earliest_message_data.get("time")
self.context.last_read_time = time.time() self.context.last_read_time = time.time()
await self.cycle_processor.observe(interest_value = interest_value) await self.cycle_processor.observe(interest_value = interest_value)
else: else:
@@ -418,6 +412,7 @@ class HeartFChatting:
# talk_frequency = global_config.chat.get_current_talk_frequency(self.context.chat_stream.stream_id) # talk_frequency = global_config.chat.get_current_talk_frequency(self.context.chat_stream.stream_id)
modified_exit_count_threshold = self.context.focus_energy / global_config.chat.focus_value modified_exit_count_threshold = self.context.focus_energy / global_config.chat.focus_value
modified_exit_interest_threshold = 3 / global_config.chat.focus_value
total_interest = 0.0 total_interest = 0.0
for msg_dict in new_message: for msg_dict in new_message:
interest_value = msg_dict.get("interest_value", 0.0) interest_value = msg_dict.get("interest_value", 0.0)
@@ -441,11 +436,11 @@ class HeartFChatting:
if not hasattr(self, "_last_accumulated_interest") or total_interest != self._last_accumulated_interest: if not hasattr(self, "_last_accumulated_interest") or total_interest != self._last_accumulated_interest:
logger.info(f"{self.context.log_prefix} breaking形式当前累计兴趣值: {total_interest:.2f}, 专注度: {global_config.chat.focus_value:.1f}") logger.info(f"{self.context.log_prefix} breaking形式当前累计兴趣值: {total_interest:.2f}, 专注度: {global_config.chat.focus_value:.1f}")
self._last_accumulated_interest = total_interest self._last_accumulated_interest = total_interest
if total_interest >= 3 / global_config.chat.focus_value: if total_interest >= modified_exit_interest_threshold:
# 记录兴趣度到列表 # 记录兴趣度到列表
self.recent_interest_records.append(total_interest) self.recent_interest_records.append(total_interest)
logger.info( logger.info(
f"{self.context.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{3 / global_config.chat.focus_value}),结束等待" f"{self.context.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{modified_exit_interest_threshold:.1f}),结束等待"
) )
return True,total_interest/new_message_count return True,total_interest/new_message_count

View File

@@ -1,84 +0,0 @@
import random
from typing import Dict, Any, TYPE_CHECKING
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.willing.willing_manager import get_willing_manager
from .hfc_context import HfcContext
if TYPE_CHECKING:
from .cycle_processor import CycleProcessor
logger = get_logger("hfc.normal_mode")
class NormalModeHandler:
def __init__(self, context: HfcContext, cycle_processor: "CycleProcessor"):
"""
初始化普通模式处理器
Args:
context: HFC聊天上下文对象
cycle_processor: 循环处理器,用于处理决定回复的消息
功能说明:
- 处理NORMAL模式下的消息
- 根据兴趣度和回复概率决定是否回复
- 管理意愿系统和回复概率计算
"""
self.context = context
self.cycle_processor = cycle_processor
self.willing_manager = get_willing_manager()
async def handle_message(self, message_data: Dict[str, Any]) -> bool:
"""
处理NORMAL模式下的单条消息
Args:
message_data: 消息数据字典,包含用户信息、消息内容、兴趣值等
Returns:
bool: 是否进行了回复处理
功能说明:
- 计算消息的兴趣度和基础回复概率
- 应用谈话频率调整回复概率
- 过滤表情和图片消息设置回复概率为0
- 根据概率随机决定是否回复
- 如果决定回复则调用循环处理器进行处理
- 记录详细的决策日志
"""
if not self.context.chat_stream:
return False
interested_rate = message_data.get("interest_value") or 0.0
self.willing_manager.setup(message_data, self.context.chat_stream)
reply_probability = await self.willing_manager.get_reply_probability(message_data.get("message_id", ""))
if reply_probability < 1:
additional_config = message_data.get("additional_config", {})
if additional_config and "maimcore_reply_probability_gain" in additional_config:
reply_probability += additional_config["maimcore_reply_probability_gain"]
reply_probability = min(max(reply_probability, 0), 1)
talk_frequency = global_config.chat.get_current_talk_frequency(self.context.stream_id)
reply_probability = talk_frequency * reply_probability
if message_data.get("is_emoji") or message_data.get("is_picid"):
reply_probability = 0
mes_name = self.context.chat_stream.group_info.group_name if self.context.chat_stream.group_info else "私聊"
if reply_probability > 0.05:
logger.info(
f"[{mes_name}]"
f"{message_data.get('user_nickname')}:"
f"{message_data.get('processed_plain_text')}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]"
)
if random.random() < reply_probability:
await self.willing_manager.before_generate_reply_handle(message_data.get("message_id", ""))
await self.cycle_processor.observe(message_data=message_data)
return True
self.willing_manager.delete(message_data.get("message_id", ""))
return False

View File

@@ -105,7 +105,7 @@ class ResponseHandler:
return loop_info, reply_text, cycle_timers return loop_info, reply_text, cycle_timers
async def send_response(self, reply_set, reply_to, thinking_start_time, message_data) -> str: async def send_response(self, reply_set, thinking_start_time, message_data) -> str:
""" """
发送回复内容的具体实现 发送回复内容的具体实现
@@ -129,9 +129,6 @@ class ResponseHandler:
new_message_count = message_api.count_new_messages( new_message_count = message_api.count_new_messages(
chat_id=self.context.stream_id, start_time=thinking_start_time, end_time=current_time chat_id=self.context.stream_id, start_time=thinking_start_time, end_time=current_time
) )
platform = message_data.get("user_platform", "")
user_id = message_data.get("user_id", "")
reply_to_platform_id = f"{platform}:{user_id}"
need_reply = new_message_count >= random.randint(2, 4) need_reply = new_message_count >= random.randint(2, 4)
@@ -157,32 +154,26 @@ class ResponseHandler:
continue continue
if not first_replied: if not first_replied:
if need_reply: await send_api.text_to_stream(
await send_api.text_to_stream( text=data,
text=data, stream_id=self.context.stream_id,
stream_id=self.context.stream_id, reply_to_message = message_data,
reply_to=reply_to, set_reply=need_reply,
reply_to_platform_id=reply_to_platform_id, typing=False,
typing=False, )
)
else:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to_platform_id=reply_to_platform_id,
typing=False,
)
first_replied = True first_replied = True
else: else:
await send_api.text_to_stream( await send_api.text_to_stream(
text=data, text=data,
stream_id=self.context.stream_id, stream_id=self.context.stream_id,
reply_to_platform_id=reply_to_platform_id, reply_to_message = message_data,
set_reply=need_reply,
typing=True, typing=True,
) )
return reply_text return reply_text
# TODO: 已废弃
async def generate_response( async def generate_response(
self, self,
message_data: dict, message_data: dict,

View File

@@ -83,7 +83,7 @@ def init_prompt() -> None:
class ExpressionLearner: class ExpressionLearner:
def __init__(self, chat_id: str) -> None: def __init__(self, chat_id: str) -> None:
self.express_learn_model: LLMRequest = LLMRequest( self.express_learn_model: LLMRequest = LLMRequest(
model_set=model_config.model_task_config.replyer_1, request_type="expressor.learner" model_set=model_config.model_task_config.replyer, request_type="expressor.learner"
) )
self.chat_id = chat_id self.chat_id = chat_id
self.chat_name = get_chat_manager().get_stream_name(chat_id) or chat_id self.chat_name = get_chat_manager().get_stream_name(chat_id) or chat_id

View File

@@ -48,6 +48,19 @@ def init_prompt():
{actions_before_now_block} {actions_before_now_block}
{no_action_block} {no_action_block}
动作reply
动作描述:参与聊天回复,发送文本进行表达
- 你想要闲聊或者随便附
- {mentioned_bonus}
- 如果你刚刚进行了回复,不要对同一个话题重复回应
- 不要回复自己发送的消息
{{
"action": "reply",
"target_message_id":"触发action的消息id",
"reason":"回复的原因"
}}
{action_options_text} {action_options_text}
你必须从上面列出的可用action中选择一个并说明触发action的消息id不是消息原文和选择该action的原因。 你必须从上面列出的可用action中选择一个并说明触发action的消息id不是消息原文和选择该action的原因。
@@ -302,12 +315,11 @@ class ActionPlanner:
if key not in ["action", "reason"]: if key not in ["action", "reason"]:
action_data[key] = value action_data[key] = value
# 在FOCUS模式下非no_reply动作需要target_message_id # 非no_reply动作需要target_message_id
if action != "no_reply": if action != "no_reply":
if target_message_id := parsed_json.get("target_message_id"): if target_message_id := parsed_json.get("target_message_id"):
# 根据target_message_id查找原始消息 # 根据target_message_id查找原始消息
target_message = self.find_message_by_id(target_message_id, message_id_list) target_message = self.find_message_by_id(target_message_id, message_id_list)
# target_message = None
# 如果获取的target_message为None输出warning并重新plan # 如果获取的target_message为None输出warning并重新plan
if target_message is None: if target_message is None:
self.plan_retry_count += 1 self.plan_retry_count += 1
@@ -320,7 +332,7 @@ class ActionPlanner:
self.plan_retry_count = 0 # 重置计数器 self.plan_retry_count = 0 # 重置计数器
else: else:
# 递归重新plan # 递归重新plan
return await self.plan(mode) return await self.plan(mode, loop_start_time, available_actions)
else: else:
# 成功获取到target_message重置计数器 # 成功获取到target_message重置计数器
self.plan_retry_count = 0 self.plan_retry_count = 0
@@ -328,9 +340,7 @@ class ActionPlanner:
logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id") logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id")
if action == "no_action": if action != "no_reply" and action != "reply" and action not in current_available_actions:
reasoning = "normal决定不使用额外动作"
elif action != "no_reply" and action != "reply" and action not in current_available_actions:
logger.warning( logger.warning(
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'" f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
) )
@@ -470,34 +480,15 @@ class ActionPlanner:
mentioned_bonus = "\n- 有人提到你或者at你" mentioned_bonus = "\n- 有人提到你或者at你"
if mode == ChatMode.FOCUS: if mode == ChatMode.FOCUS:
no_action_block = f"""重要说明: no_action_block = """重要说明:
- 'no_reply' 表示只进行不进行回复,等待合适的回复时机 - 'no_reply' 表示不进行回复,等待合适的回复时机
- 当你刚刚发送了消息没有人回复时选择no_reply - 当你刚刚发送了消息没有人回复时选择no_reply
- 当你一次发送了太多消息为了避免打扰聊天节奏选择no_reply - 当你一次发送了太多消息为了避免打扰聊天节奏选择no_reply
动作reply
动作描述:参与聊天回复,发送文本进行表达
- 你想要闲聊或者随便附和{mentioned_bonus}
- 如果你刚刚进行了回复,不要对同一个话题重复回应
- 不要回复自己发送的消息
{{
"action": "reply",
"target_message_id":"触发action的消息id",
"reason":"回复的原因"
}}
""" """
else: # NORMAL Mode else: # NORMAL Mode
no_action_block = f"""重要说明: no_action_block = """重要说明:
- 'reply' 表示只进行普通聊天回复,不执行任何额外动作 - 'reply' 表示只进行普通聊天回复,不执行任何额外动作
- 其他action表示在普通回复的基础上执行相应的额外动作 - 其他action表示在普通回复的基础上执行相应的额外动作
动作reply
动作描述:参与聊天回复,发送文本进行表达
- 你想要闲聊或者随便附
- {mentioned_bonus}
- 如果你刚刚进行了回复,不要对同一个话题重复回应
- 不要回复自己发送的消息
{{ {{
"action": "reply", "action": "reply",
"target_message_id":"触发action的消息id", "target_message_id":"触发action的消息id",
@@ -505,6 +496,7 @@ class ActionPlanner:
}}""" }}"""
chat_context_description = "你现在正在一个群聊中" chat_context_description = "你现在正在一个群聊中"
chat_target_name = None
if not is_group_chat and chat_target_info: if not is_group_chat and chat_target_info:
chat_target_name = ( chat_target_name = (
chat_target_info.get("person_name") chat_target_info.get("person_name")
@@ -535,6 +527,7 @@ class ActionPlanner:
chat_context_description=chat_context_description, chat_context_description=chat_context_description,
chat_content_block=chat_content_block, chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block, actions_before_now_block=actions_before_now_block,
mentioned_bonus=mentioned_bonus,
no_action_block=no_action_block, no_action_block=no_action_block,
action_options_text=action_options_block, action_options_text=action_options_block,
moderation_prompt=moderation_prompt_block, moderation_prompt=moderation_prompt_block,

View File

@@ -227,37 +227,9 @@ class DefaultReplyer:
def __init__( def __init__(
self, self,
chat_stream: ChatStream, chat_stream: ChatStream,
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, request_type: str = "replyer",
request_type: str = "focus.replyer",
): ):
self.request_type = request_type self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type)
if model_set_with_weight:
# self.express_model_configs = model_configs
self.model_set: List[Tuple[TaskConfig, float]] = model_set_with_weight
else:
# 当未提供配置时,使用默认配置并赋予默认权重
# model_config_1 = global_config.model.replyer_1.copy()
# model_config_2 = global_config.model.replyer_2.copy()
prob_first = global_config.chat.replyer_random_probability
# model_config_1["weight"] = prob_first
# model_config_2["weight"] = 1.0 - prob_first
# self.express_model_configs = [model_config_1, model_config_2]
self.model_set = [
(model_config.model_task_config.replyer_1, prob_first),
(model_config.model_task_config.replyer_2, 1.0 - prob_first),
]
# if not self.express_model_configs:
# logger.warning("未找到有效的模型配置,回复生成可能会失败。")
# # 提供一个最终的回退,以防止在空列表上调用 random.choice
# fallback_config = global_config.model.replyer_1.copy()
# fallback_config.setdefault("weight", 1.0)
# self.express_model_configs = [fallback_config]
self.chat_stream = chat_stream self.chat_stream = chat_stream
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id) self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id)
@@ -270,14 +242,6 @@ class DefaultReplyer:
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id) self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id)
def _select_weighted_models_config(self) -> Tuple[TaskConfig, float]:
"""使用加权随机选择来挑选一个模型配置"""
configs = self.model_set
# 提取权重,如果模型配置中没有'weight'键则默认为1.0
weights = [weight for _, weight in configs]
return random.choices(population=configs, weights=weights, k=1)[0]
async def generate_reply_with_context( async def generate_reply_with_context(
self, self,
reply_to: str = "", reply_to: str = "",
@@ -286,6 +250,7 @@ class DefaultReplyer:
enable_tool: bool = True, enable_tool: bool = True,
from_plugin: bool = True, from_plugin: bool = True,
stream_id: Optional[str] = None, stream_id: Optional[str] = None,
reply_message: Optional[Dict[str, Any]] = None,
) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str]]: ) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str]]:
# sourcery skip: merge-nested-ifs # sourcery skip: merge-nested-ifs
""" """
@@ -313,6 +278,7 @@ class DefaultReplyer:
extra_info=extra_info, extra_info=extra_info,
available_actions=available_actions, available_actions=available_actions,
enable_tool=enable_tool, enable_tool=enable_tool,
reply_message=reply_message,
) )
if not prompt: if not prompt:
@@ -590,7 +556,7 @@ class DefaultReplyer:
# 只有当完全没有任何记忆时才返回空字符串 # 只有当完全没有任何记忆时才返回空字符串
return memory_str if has_any_memory else "" return memory_str if has_any_memory else ""
async def build_tool_info(self, chat_history: str, reply_to: str = "", enable_tool: bool = True) -> str: async def build_tool_info(self, chat_history: str, sender: str, target: str, enable_tool: bool = True) -> str:
"""构建工具信息块 """构建工具信息块
Args: Args:
@@ -605,18 +571,10 @@ class DefaultReplyer:
if not enable_tool: if not enable_tool:
return "" return ""
if not reply_to:
return ""
sender, text = self._parse_reply_target(reply_to)
if not text:
return ""
try: try:
# 使用工具执行器获取信息 # 使用工具执行器获取信息
tool_results, _, _ = await self.tool_executor.execute_from_chat_message( tool_results, _, _ = await self.tool_executor.execute_from_chat_message(
sender=sender, target_message=text, chat_history=chat_history, return_details=False sender=sender, target_message=target, chat_history=chat_history, return_details=False
) )
if tool_results: if tool_results:
@@ -826,7 +784,8 @@ class DefaultReplyer:
extra_info: str = "", extra_info: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None, available_actions: Optional[Dict[str, ActionInfo]] = None,
enable_tool: bool = True, enable_tool: bool = True,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if reply_message: Optional[Dict[str, Any]] = None,
) -> str:
""" """
构建回复器上下文 构建回复器上下文
@@ -836,6 +795,7 @@ class DefaultReplyer:
available_actions: 可用动作 available_actions: 可用动作
enable_timeout: 是否启用超时处理 enable_timeout: 是否启用超时处理
enable_tool: 是否启用工具调用 enable_tool: 是否启用工具调用
reply_message: 回复的原始消息
Returns: Returns:
str: 构建好的上下文 str: 构建好的上下文
@@ -858,7 +818,20 @@ class DefaultReplyer:
else: else:
mood_prompt = "" mood_prompt = ""
sender, target = self._parse_reply_target(reply_to) if reply_to:
#兼容旧的reply_to
sender, target = self._parse_reply_target(reply_to)
else:
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
platform = reply_message.get("chat_info_platform")
person_id = person_info_manager.get_person_id(
platform, # type: ignore
reply_message.get("user_id"), # type: ignore
)
person_name = await person_info_manager.get_value(person_id, "person_name")
sender = person_name
target = reply_message.get('processed_plain_text')
person_info_manager = get_person_info_manager() person_info_manager = get_person_info_manager()
person_id = person_info_manager.get_person_id_by_person_name(sender) person_id = person_info_manager.get_person_id_by_person_name(sender)
user_id = person_info_manager.get_value_sync(person_id, "user_id") user_id = person_info_manager.get_value_sync(person_id, "user_id")
@@ -908,12 +881,12 @@ class DefaultReplyer:
self._time_and_run_task( self._time_and_run_task(
self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits" self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits"
), ),
self._time_and_run_task(self.build_relation_info(reply_to), "relation_info"), self._time_and_run_task(self.build_relation_info(sender, target), "relation_info"),
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"), self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"),
self._time_and_run_task( self._time_and_run_task(
self.build_tool_info(chat_talking_prompt_short, reply_to, enable_tool=enable_tool), "tool_info" self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info"
), ),
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, reply_to), "prompt_info"), self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task( self._time_and_run_task(
PromptUtils.build_cross_context(chat_id, target_user_info, global_config.personality.prompt_mode), PromptUtils.build_cross_context(chat_id, target_user_info, global_config.personality.prompt_mode),
"cross_context", "cross_context",
@@ -1046,12 +1019,17 @@ class DefaultReplyer:
raw_reply: str, raw_reply: str,
reason: str, reason: str,
reply_to: str, reply_to: str,
reply_message: Optional[Dict[str, Any]] = None,
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if ) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
chat_stream = self.chat_stream chat_stream = self.chat_stream
chat_id = chat_stream.stream_id chat_id = chat_stream.stream_id
is_group_chat = bool(chat_stream.group_info) is_group_chat = bool(chat_stream.group_info)
sender, target = self._parse_reply_target(reply_to) if reply_message:
sender = reply_message.get("sender")
target = reply_message.get("target")
else:
sender, target = self._parse_reply_target(reply_to)
# 添加情绪状态获取 # 添加情绪状态获取
if global_config.mood.enable_mood: if global_config.mood.enable_mood:
@@ -1082,7 +1060,7 @@ class DefaultReplyer:
# 并行执行2个构建任务 # 并行执行2个构建任务
expression_habits_block, relation_info = await asyncio.gather( expression_habits_block, relation_info = await asyncio.gather(
self.build_expression_habits(chat_talking_prompt_half, target), self.build_expression_habits(chat_talking_prompt_half, target),
self.build_relation_info(reply_to), self.build_relation_info(sender, target),
) )
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target) keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
@@ -1195,34 +1173,24 @@ class DefaultReplyer:
async def llm_generate_content(self, prompt: str): async def llm_generate_content(self, prompt: str):
with Timer("LLM生成", {}): # 内部计时器,可选保留 with Timer("LLM生成", {}): # 内部计时器,可选保留
# 加权随机选择一个模型配置 # 直接使用已初始化的模型实例
selected_model_config, weight = self._select_weighted_models_config() logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}")
logger.info(f"使用模型集生成回复: {selected_model_config} (选中概率: {weight})")
express_model = LLMRequest(model_set=selected_model_config, request_type=self.request_type)
if global_config.debug.show_prompt: if global_config.debug.show_prompt:
logger.info(f"\n{prompt}\n") logger.info(f"\n{prompt}\n")
else: else:
logger.debug(f"\n{prompt}\n") logger.debug(f"\n{prompt}\n")
content, (reasoning_content, model_name, tool_calls) = await express_model.generate_response_async(prompt) content, (reasoning_content, model_name, tool_calls) = await self.express_model.generate_response_async(prompt)
logger.debug(f"replyer生成内容: {content}") logger.debug(f"replyer生成内容: {content}")
return content, reasoning_content, model_name, tool_calls return content, reasoning_content, model_name, tool_calls
async def get_prompt_info(self, message: str, reply_to: str): async def get_prompt_info(self, message: str, sender: str, target: str):
related_info = "" related_info = ""
start_time = time.time() start_time = time.time()
from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool
if not reply_to:
logger.debug("没有回复对象,跳过获取知识库内容")
return ""
sender, content = self._parse_reply_target(reply_to)
if not content:
logger.debug("回复对象内容为空,跳过获取知识库内容")
return ""
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}") logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
# 从LPMM知识库获取知识 # 从LPMM知识库获取知识
try: try:
@@ -1240,7 +1208,7 @@ class DefaultReplyer:
time_now=time_now, time_now=time_now,
chat_history=message, chat_history=message,
sender=sender, sender=sender,
target_message=content, target_message=target,
) )
_, _, _, _, tool_calls = await llm_api.generate_with_model_with_tools( _, _, _, _, tool_calls = await llm_api.generate_with_model_with_tools(
prompt, prompt,
@@ -1269,16 +1237,11 @@ class DefaultReplyer:
logger.error(f"获取知识库内容时发生异常: {str(e)}") logger.error(f"获取知识库内容时发生异常: {str(e)}")
return "" return ""
async def build_relation_info(self, reply_to: str = ""): async def build_relation_info(self, sender: str, target: str):
if not global_config.relationship.enable_relationship: if not global_config.relationship.enable_relationship:
return "" return ""
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id) relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
if not reply_to:
return ""
sender, text = self._parse_reply_target(reply_to)
if not sender or not text:
return ""
# 获取用户ID # 获取用户ID
person_info_manager = get_person_info_manager() person_info_manager = get_person_info_manager()

View File

@@ -16,7 +16,6 @@ class ReplyerManager:
self, self,
chat_stream: Optional[ChatStream] = None, chat_stream: Optional[ChatStream] = None,
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
request_type: str = "replyer", request_type: str = "replyer",
) -> Optional[DefaultReplyer]: ) -> Optional[DefaultReplyer]:
""" """
@@ -50,7 +49,6 @@ class ReplyerManager:
# model_configs 只在此时(初始化时)生效 # model_configs 只在此时(初始化时)生效
replyer = DefaultReplyer( replyer = DefaultReplyer(
chat_stream=target_stream, chat_stream=target_stream,
model_set_with_weight=model_set_with_weight, # 可以是None此时使用默认模型
request_type=request_type, request_type=request_type,
) )
self._repliers[stream_id] = replyer self._repliers[stream_id] = replyer

View File

@@ -1,60 +0,0 @@
import asyncio
from src.config.config import global_config
from .willing_manager import BaseWillingManager
class ClassicalWillingManager(BaseWillingManager):
def __init__(self):
super().__init__()
self._decay_task: asyncio.Task | None = None
async def _decay_reply_willing(self):
"""定期衰减回复意愿"""
while True:
await asyncio.sleep(1)
for chat_id in self.chat_reply_willing:
self.chat_reply_willing[chat_id] = max(0.0, self.chat_reply_willing[chat_id] * 0.9)
async def async_task_starter(self):
if self._decay_task is None:
self._decay_task = asyncio.create_task(self._decay_reply_willing())
async def get_reply_probability(self, message_id):
willing_info = self.ongoing_messages[message_id]
chat_id = willing_info.chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0)
# print(f"[{chat_id}] 回复意愿: {current_willing}")
interested_rate = willing_info.interested_rate
# print(f"[{chat_id}] 兴趣值: {interested_rate}")
current_willing += interested_rate
if willing_info.is_mentioned_bot and global_config.chat.mentioned_bot_inevitable_reply and current_willing < 2:
current_willing += 1 if current_willing < 1.0 else 0.2
self.chat_reply_willing[chat_id] = min(current_willing, 1.0)
reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1.5)
# print(f"[{chat_id}] 回复概率: {reply_probability}")
return reply_probability
async def before_generate_reply_handle(self, message_id):
pass
async def after_generate_reply_handle(self, message_id):
if message_id not in self.ongoing_messages:
return
chat_id = self.ongoing_messages[message_id].chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0)
if current_willing < 1:
self.chat_reply_willing[chat_id] = min(1.0, current_willing + 0.3)
async def not_reply_handle(self, message_id):
return await super().not_reply_handle(message_id)

View File

@@ -1,24 +0,0 @@
from .willing_manager import BaseWillingManager
NOT_IMPLEMENTED_MESSAGE = "\ncustom模式你实现了吗没自行实现不要选custom。给你退了快点给你麦爹配置\n以上内容由gemini生成如有不满请投诉gemini"
class CustomWillingManager(BaseWillingManager):
async def async_task_starter(self) -> None:
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
async def before_generate_reply_handle(self, message_id: str):
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
async def after_generate_reply_handle(self, message_id: str):
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
async def not_reply_handle(self, message_id: str):
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
async def get_reply_probability(self, message_id: str):
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
def __init__(self):
super().__init__()
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)

View File

@@ -1,296 +0,0 @@
"""
Mxp 模式:梦溪畔独家赞助
此模式的一些参数不会在配置文件中显示,要修改请在可变参数下修改
同时一些全局设置对此模式无效
此模式的可变参数暂时比较草率,需要调参仙人的大手
此模式的特点:
1.每个聊天流的每个用户的意愿是独立的
2.接入关系系统,关系会影响意愿值(已移除,因为关系系统重构)
3.会根据群聊的热度来调整基础意愿值
4.限制同时思考的消息数量,防止喷射
5.拥有单聊增益无论在群里还是私聊只要bot一直和你聊就会增加意愿值
6.意愿分为衰减意愿+临时意愿
7.疲劳机制
如果你发现本模式出现了bug
上上策是询问智慧的小草神()
上策是询问万能的千石可乐
中策是发issue
下下策是询问一个菜鸟(@梦溪畔)
"""
from typing import Dict
import asyncio
import time
import math
from src.chat.message_receive.chat_stream import ChatStream
from .willing_manager import BaseWillingManager
class MxpWillingManager(BaseWillingManager):
"""Mxp意愿管理器"""
def __init__(self):
super().__init__()
self.chat_person_reply_willing: Dict[str, Dict[str, float]] = {} # chat_id: {person_id: 意愿值}
self.chat_new_message_time: Dict[str, list[float]] = {} # 聊天流ID: 消息时间
self.last_response_person: Dict[str, tuple[str, int]] = {} # 上次回复的用户信息
self.temporary_willing: float = 0 # 临时意愿值
self.chat_bot_message_time: Dict[str, list[float]] = {} # 聊天流ID: bot已回复消息时间
self.chat_fatigue_punishment_list: Dict[
str, list[tuple[float, float]]
] = {} # 聊天流疲劳惩罚列, 聊天流ID: 惩罚时间列(开始时间,持续时间)
self.chat_fatigue_willing_attenuation: Dict[str, float] = {} # 聊天流疲劳意愿衰减值
# 可变参数
self.intention_decay_rate = 0.93 # 意愿衰减率
self.number_of_message_storage = 12 # 消息存储数量
self.expected_replies_per_min = 3 # 每分钟预期回复数
self.basic_maximum_willing = 0.5 # 基础最大意愿值
self.mention_willing_gain = 0.6 # 提及意愿增益
self.interest_willing_gain = 0.3 # 兴趣意愿增益
self.single_chat_gain = 0.12 # 单聊增益
self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int)
self.fatigue_coefficient = 1.0 # 疲劳系数
self.is_debug = False # 是否开启调试模式
async def async_task_starter(self) -> None:
"""异步任务启动器"""
asyncio.create_task(self._return_to_basic_willing())
asyncio.create_task(self._chat_new_message_to_change_basic_willing())
asyncio.create_task(self._fatigue_attenuation())
async def before_generate_reply_handle(self, message_id: str):
"""回复前处理"""
current_time = time.time()
async with self.lock:
w_info = self.ongoing_messages[message_id]
if w_info.chat_id not in self.chat_bot_message_time:
self.chat_bot_message_time[w_info.chat_id] = []
self.chat_bot_message_time[w_info.chat_id] = [
t for t in self.chat_bot_message_time[w_info.chat_id] if current_time - t < 60
]
self.chat_bot_message_time[w_info.chat_id].append(current_time)
if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num):
time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
self.chat_fatigue_punishment_list[w_info.chat_id].append((current_time, time_interval * 2))
async def after_generate_reply_handle(self, message_id: str):
"""回复后处理"""
async with self.lock:
w_info = self.ongoing_messages[message_id]
# 移除关系值相关代码
# rel_value = await w_info.person_info_manager.get_value(w_info.person_id, "relationship_value")
# rel_level = self._get_relationship_level_num(rel_value)
# self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += rel_level * 0.05
now_chat_new_person = self.last_response_person.get(w_info.chat_id, (w_info.person_id, 0))
if now_chat_new_person[0] == w_info.person_id:
if now_chat_new_person[1] < 3:
tmp_list = list(now_chat_new_person)
tmp_list[1] += 1 # type: ignore
self.last_response_person[w_info.chat_id] = tuple(tmp_list) # type: ignore
else:
self.last_response_person[w_info.chat_id] = (w_info.person_id, 0)
async def not_reply_handle(self, message_id: str):
"""不回复处理"""
async with self.lock:
w_info = self.ongoing_messages[message_id]
if w_info.is_mentioned_bot:
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.mention_willing_gain / 2.5
if (
w_info.chat_id in self.last_response_person
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
and self.last_response_person[w_info.chat_id][1]
):
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.single_chat_gain * (
2 * self.last_response_person[w_info.chat_id][1] - 1
)
now_chat_new_person = self.last_response_person.get(w_info.chat_id, ("", 0))
if now_chat_new_person[0] != w_info.person_id:
self.last_response_person[w_info.chat_id] = (w_info.person_id, 0)
async def get_reply_probability(self, message_id: str):
# sourcery skip: merge-duplicate-blocks, remove-redundant-if
"""获取回复概率"""
async with self.lock:
w_info = self.ongoing_messages[message_id]
current_willing = self.chat_person_reply_willing[w_info.chat_id][w_info.person_id]
if self.is_debug:
self.logger.debug(f"基础意愿值:{current_willing}")
if w_info.is_mentioned_bot:
willing_gain = self.mention_willing_gain / (int(current_willing) + 1)
current_willing += willing_gain
if self.is_debug:
self.logger.debug(f"提及增益:{willing_gain}")
if w_info.interested_rate > 0:
willing_gain = math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
current_willing += willing_gain
if self.is_debug:
self.logger.debug(f"兴趣增益:{willing_gain}")
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] = current_willing
# 添加单聊增益
if (
w_info.chat_id in self.last_response_person
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
and self.last_response_person[w_info.chat_id][1]
):
current_willing += self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)
if self.is_debug:
self.logger.debug(
f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}"
)
current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
if self.is_debug:
self.logger.debug(f"疲劳衰减:{self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)}")
chat_ongoing_messages = [msg for msg in self.ongoing_messages.values() if msg.chat_id == w_info.chat_id]
chat_person_ongoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id]
if len(chat_person_ongoing_messages) >= 2:
current_willing = 0
if self.is_debug:
self.logger.debug("进行中消息惩罚归0")
elif len(chat_ongoing_messages) == 2:
current_willing -= 0.5
if self.is_debug:
self.logger.debug("进行中消息惩罚:-0.5")
elif len(chat_ongoing_messages) == 3:
current_willing -= 1.5
if self.is_debug:
self.logger.debug("进行中消息惩罚:-1.5")
elif len(chat_ongoing_messages) >= 4:
current_willing = 0
if self.is_debug:
self.logger.debug("进行中消息惩罚归0")
probability = self._willing_to_probability(current_willing)
self.temporary_willing = current_willing
return probability
async def _return_to_basic_willing(self):
"""使每个人的意愿恢复到chat基础意愿"""
while True:
await asyncio.sleep(3)
async with self.lock:
for chat_id, person_willing in self.chat_person_reply_willing.items():
for person_id, willing in person_willing.items():
if chat_id not in self.chat_reply_willing:
self.logger.debug(f"聊天流{chat_id}不存在,错误")
continue
basic_willing = self.chat_reply_willing[chat_id]
person_willing[person_id] = (
basic_willing + (willing - basic_willing) * self.intention_decay_rate
)
def setup(self, message: dict, chat_stream: ChatStream):
super().setup(message, chat_stream)
stream_id = chat_stream.stream_id
self.chat_reply_willing[stream_id] = self.chat_reply_willing.get(stream_id, self.basic_maximum_willing)
self.chat_person_reply_willing[stream_id] = self.chat_person_reply_willing.get(stream_id, {})
self.chat_person_reply_willing[stream_id][self.ongoing_messages[message.get("message_id", "")].person_id] = (
self.chat_person_reply_willing[stream_id].get(
self.ongoing_messages[message.get("message_id", "")].person_id,
self.chat_reply_willing[stream_id],
)
)
current_time = time.time()
if stream_id not in self.chat_new_message_time:
self.chat_new_message_time[stream_id] = []
self.chat_new_message_time[stream_id].append(current_time)
if len(self.chat_new_message_time[stream_id]) > self.number_of_message_storage:
self.chat_new_message_time[stream_id].pop(0)
if stream_id not in self.chat_fatigue_punishment_list:
self.chat_fatigue_punishment_list[stream_id] = [
(
current_time,
self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60,
)
]
self.chat_fatigue_willing_attenuation[stream_id] = (
-2 * self.basic_maximum_willing * self.fatigue_coefficient
)
@staticmethod
def _willing_to_probability(willing: float) -> float:
"""意愿值转化为概率"""
willing = max(0, willing)
if willing < 2:
return math.atan(willing * 2) / math.pi * 2
elif willing < 2.5:
return math.atan(willing * 4) / math.pi * 2
else:
return 1
async def _chat_new_message_to_change_basic_willing(self):
"""聊天流新消息改变基础意愿"""
update_time = 20
while True:
await asyncio.sleep(update_time)
async with self.lock:
for chat_id, message_times in self.chat_new_message_time.items():
# 清理过期消息
current_time = time.time()
message_times = [
msg_time
for msg_time in message_times
if current_time - msg_time
< self.number_of_message_storage
* self.basic_maximum_willing
/ self.expected_replies_per_min
* 60
]
self.chat_new_message_time[chat_id] = message_times
if len(message_times) < self.number_of_message_storage:
self.chat_reply_willing[chat_id] = self.basic_maximum_willing
update_time = 20
elif len(message_times) == self.number_of_message_storage:
time_interval = current_time - message_times[0]
basic_willing = self._basic_willing_calculate(time_interval)
self.chat_reply_willing[chat_id] = basic_willing
update_time = 17 * basic_willing / self.basic_maximum_willing + 3
else:
self.logger.debug(f"聊天流{chat_id}消息时间数量异常,数量:{len(message_times)}")
self.chat_reply_willing[chat_id] = 0
if self.is_debug:
self.logger.debug(f"聊天流意愿值更新:{self.chat_reply_willing}")
def _basic_willing_calculate(self, t: float) -> float:
"""基础意愿值计算"""
return math.tan(t * self.expected_replies_per_min * math.pi / 120 / self.number_of_message_storage) / 2
async def _fatigue_attenuation(self):
"""疲劳衰减"""
while True:
await asyncio.sleep(1)
current_time = time.time()
async with self.lock:
for chat_id, fatigue_list in self.chat_fatigue_punishment_list.items():
fatigue_list = [z for z in fatigue_list if current_time - z[0] < z[1]]
self.chat_fatigue_willing_attenuation[chat_id] = 0
for start_time, duration in fatigue_list:
self.chat_fatigue_willing_attenuation[chat_id] += (
self.chat_reply_willing[chat_id]
* 2
/ math.pi
* math.asin(2 * (current_time - start_time) / duration - 1)
- self.chat_reply_willing[chat_id]
) * self.fatigue_coefficient
async def get_willing(self, chat_id):
return self.temporary_willing

View File

@@ -1,180 +0,0 @@
import importlib
import asyncio
from abc import ABC, abstractmethod
from typing import Dict, Optional, Any
from rich.traceback import install
from dataclasses import dataclass
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
install(extra_lines=3)
"""
基类方法概览:
以下8个方法是你必须在子类重写的哪怕什么都不干
async_task_starter 在程序启动时执行在其中用asyncio.create_task启动你想要执行的异步任务
before_generate_reply_handle 确定要回复后,在生成回复前的处理
after_generate_reply_handle 确定要回复后,在生成回复后的处理
not_reply_handle 确定不回复后的处理
get_reply_probability 获取回复概率
get_variable_parameters 暂不确定
set_variable_parameters 暂不确定
以下2个方法根据你的实现可以做调整
get_willing 获取某聊天流意愿
set_willing 设置某聊天流意愿
规范说明:
模块文件命名: `mode_{manager_type}.py`
示例: 若 `manager_type="aggressive"`,则模块文件应为 `mode_aggressive.py`
类命名: `{manager_type}WillingManager` (首字母大写)
示例: 在 `mode_aggressive.py` 中,类名应为 `AggressiveWillingManager`
"""
logger = get_logger("willing")
@dataclass
class WillingInfo:
"""此类保存意愿模块常用的参数
Attributes:
message (MessageRecv): 原始消息对象
chat (ChatStream): 聊天流对象
person_info_manager (PersonInfoManager): 用户信息管理对象
chat_id (str): 当前聊天流的标识符
person_id (str): 发送者的个人信息的标识符
group_id (str): 群组ID如果是私聊则为空
is_mentioned_bot (bool): 是否提及了bot
is_emoji (bool): 是否为表情包
interested_rate (float): 兴趣度
"""
message: Dict[str, Any] # 原始消息数据
chat: ChatStream
person_info_manager: PersonInfoManager
chat_id: str
person_id: str
group_info: Optional[GroupInfo]
is_mentioned_bot: bool
is_emoji: bool
is_picid: bool
interested_rate: float
# current_mood: float 当前心情?
class BaseWillingManager(ABC):
"""回复意愿管理基类"""
@classmethod
def create(cls, manager_type: str) -> "BaseWillingManager":
try:
module = importlib.import_module(f".mode_{manager_type}", __package__)
manager_class = getattr(module, f"{manager_type.capitalize()}WillingManager")
if not issubclass(manager_class, cls):
raise TypeError(f"Manager class {manager_class.__name__} is not a subclass of {cls.__name__}")
else:
logger.info(f"普通回复模式:{manager_type}")
return manager_class()
except (ImportError, AttributeError, TypeError) as e:
module = importlib.import_module(".mode_classical", __package__)
manager_class = module.ClassicalWillingManager
logger.info(f"载入当前意愿模式{manager_type}失败,使用经典配方~~~~")
logger.debug(f"加载willing模式{manager_type}失败,原因: {str(e)}")
return manager_class()
def __init__(self):
self.chat_reply_willing: Dict[str, float] = {} # 存储每个聊天流的回复意愿(chat_id)
self.ongoing_messages: Dict[str, WillingInfo] = {} # 当前正在进行的消息(message_id)
self.lock = asyncio.Lock()
self.logger = logger
def setup(self, message: dict, chat: ChatStream):
person_id = PersonInfoManager.get_person_id(chat.platform, chat.user_info.user_id) # type: ignore
self.ongoing_messages[message.get("message_id", "")] = WillingInfo(
message=message,
chat=chat,
person_info_manager=get_person_info_manager(),
chat_id=chat.stream_id,
person_id=person_id,
group_info=chat.group_info,
is_mentioned_bot=message.get("is_mentioned", False),
is_emoji=message.get("is_emoji", False),
is_picid=message.get("is_picid", False),
interested_rate=message.get("interest_value") or 0.0,
)
def delete(self, message_id: str):
del_message = self.ongoing_messages.pop(message_id, None)
if not del_message:
logger.debug(f"尝试删除不存在的消息 ID: {message_id},可能已被其他流程处理,喵~")
@abstractmethod
async def async_task_starter(self) -> None:
"""抽象方法:异步任务启动器"""
pass
@abstractmethod
async def before_generate_reply_handle(self, message_id: str):
"""抽象方法:回复前处理"""
pass
@abstractmethod
async def after_generate_reply_handle(self, message_id: str):
"""抽象方法:回复后处理"""
pass
@abstractmethod
async def not_reply_handle(self, message_id: str):
"""抽象方法:不回复处理"""
pass
@abstractmethod
async def get_reply_probability(self, message_id: str):
"""抽象方法:获取回复概率"""
raise NotImplementedError
async def get_willing(self, chat_id: str):
"""获取指定聊天流的回复意愿"""
async with self.lock:
return self.chat_reply_willing.get(chat_id, 0)
async def set_willing(self, chat_id: str, willing: float):
"""设置指定聊天流的回复意愿"""
async with self.lock:
self.chat_reply_willing[chat_id] = willing
# @abstractmethod
# async def get_variable_parameters(self) -> Dict[str, str]:
# """抽象方法:获取可变参数"""
# pass
# @abstractmethod
# async def set_variable_parameters(self, parameters: Dict[str, any]):
# """抽象方法:设置可变参数"""
# pass
def init_willing_manager() -> BaseWillingManager:
"""
根据配置初始化并返回对应的WillingManager实例
Returns:
对应mode的WillingManager实例
"""
mode = global_config.normal_chat.willing_mode.lower()
return BaseWillingManager.create(mode)
# 全局willing_manager对象
willing_manager = None
def get_willing_manager():
global willing_manager
if willing_manager is None:
willing_manager = init_willing_manager()
return willing_manager

View File

@@ -106,8 +106,7 @@ class ModelTaskConfig(ValidatedConfigBase):
# 必需配置项 # 必需配置项
utils: TaskConfig = Field(..., description="组件模型配置") utils: TaskConfig = Field(..., description="组件模型配置")
utils_small: TaskConfig = Field(..., description="组件小模型配置") utils_small: TaskConfig = Field(..., description="组件小模型配置")
replyer_1: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置") replyer: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置")
replyer_2: TaskConfig = Field(..., description="normal_chat次要回复模型配置")
maizone: TaskConfig = Field(..., description="maizone专用模型") maizone: TaskConfig = Field(..., description="maizone专用模型")
emotion: TaskConfig = Field(..., description="情绪模型配置") emotion: TaskConfig = Field(..., description="情绪模型配置")
vlm: TaskConfig = Field(..., description="视觉语言模型配置") vlm: TaskConfig = Field(..., description="视觉语言模型配置")

View File

@@ -258,7 +258,6 @@ class MessageReceiveConfig(ValidatedConfigBase):
class NormalChatConfig(ValidatedConfigBase): class NormalChatConfig(ValidatedConfigBase):
"""普通聊天配置类""" """普通聊天配置类"""
willing_mode: str = Field(default="classical", description="意愿模式")
class ExpressionRule(ValidatedConfigBase): class ExpressionRule(ValidatedConfigBase):

View File

@@ -8,7 +8,6 @@ from maim_message import MessageServer
from src.manager.async_task_manager import async_task_manager from src.manager.async_task_manager import async_task_manager
from src.chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask from src.chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
from src.chat.emoji_system.emoji_manager import get_emoji_manager from src.chat.emoji_system.emoji_manager import get_emoji_manager
from src.chat.willing.willing_manager import get_willing_manager
from src.chat.message_receive.chat_stream import get_chat_manager from src.chat.message_receive.chat_stream import get_chat_manager
from src.config.config import global_config from src.config.config import global_config
from src.chat.message_receive.bot import chat_bot from src.chat.message_receive.bot import chat_bot
@@ -38,8 +37,6 @@ if global_config.memory.enable_memory:
install(extra_lines=3) install(extra_lines=3)
willing_manager = get_willing_manager()
logger = get_logger("main") logger = get_logger("main")
@@ -192,10 +189,6 @@ MoFox_Bot(第三方修改版)
get_emoji_manager().initialize() get_emoji_manager().initialize()
logger.info("表情包管理器初始化成功") logger.info("表情包管理器初始化成功")
# 启动愿望管理器
await willing_manager.async_task_starter()
logger.info("willing管理器初始化成功")
# 启动情绪管理器 # 启动情绪管理器
await mood_manager.start() await mood_manager.start()

View File

@@ -60,7 +60,7 @@ class MaiThinking:
self.sender = "" self.sender = ""
self.target = "" self.target = ""
self.thinking_model = LLMRequest(model_set=model_config.model_task_config.replyer_1, request_type="thinking") self.thinking_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type="thinking")
async def do_think_before_response(self): async def do_think_before_response(self):
pass pass

View File

@@ -13,8 +13,8 @@ logger = get_logger("s4u_stream_generator")
class S4UStreamGenerator: class S4UStreamGenerator:
def __init__(self): def __init__(self):
replyer_1_config = model_config.model_task_config.replyer_1 replyer_config = model_config.model_task_config.replyer
model_to_use = replyer_1_config.model_list[0] model_to_use = replyer_config.model_list[0]
model_info = model_config.get_model_info(model_to_use) model_info = model_config.get_model_info(model_to_use)
if not model_info: if not model_info:
logger.error(f"模型 {model_to_use} 在配置中未找到") logger.error(f"模型 {model_to_use} 在配置中未找到")
@@ -22,8 +22,8 @@ class S4UStreamGenerator:
provider_name = model_info.api_provider provider_name = model_info.api_provider
provider_info = model_config.get_provider(provider_name) provider_info = model_config.get_provider(provider_name)
if not provider_info: if not provider_info:
logger.error("`replyer_1` 找不到对应的Provider") logger.error("`replyer` 找不到对应的Provider")
raise ValueError("`replyer_1` 找不到对应的Provider") raise ValueError("`replyer` 找不到对应的Provider")
api_key = provider_info.api_key api_key = provider_info.api_key
base_url = provider_info.base_url base_url = provider_info.base_url
@@ -34,7 +34,7 @@ class S4UStreamGenerator:
self.client_1 = AsyncOpenAIClient(api_key=api_key, base_url=base_url) self.client_1 = AsyncOpenAIClient(api_key=api_key, base_url=base_url)
self.model_1_name = model_to_use self.model_1_name = model_to_use
self.replyer_1_config = replyer_1_config self.replyer_config = replyer_config
self.current_model_name = "unknown model" self.current_model_name = "unknown model"
self.partial_response = "" self.partial_response = ""
@@ -104,10 +104,10 @@ class S4UStreamGenerator:
self.current_model_name = self.model_1_name self.current_model_name = self.model_1_name
extra_kwargs = {} extra_kwargs = {}
if self.replyer_1_config.get("enable_thinking") is not None: if self.replyer_config.get("enable_thinking") is not None:
extra_kwargs["enable_thinking"] = self.replyer_1_config.get("enable_thinking") extra_kwargs["enable_thinking"] = self.replyer_config.get("enable_thinking")
if self.replyer_1_config.get("thinking_budget") is not None: if self.replyer_config.get("thinking_budget") is not None:
extra_kwargs["thinking_budget"] = self.replyer_1_config.get("thinking_budget") extra_kwargs["thinking_budget"] = self.replyer_config.get("thinking_budget")
async for chunk in self._generate_response_with_model( async for chunk in self._generate_response_with_model(
prompt, current_client, self.current_model_name, **extra_kwargs prompt, current_client, self.current_model_name, **extra_kwargs

View File

@@ -32,7 +32,6 @@ logger = get_logger("generator_api")
def get_replyer( def get_replyer(
chat_stream: Optional[ChatStream] = None, chat_stream: Optional[ChatStream] = None,
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
request_type: str = "replyer", request_type: str = "replyer",
) -> Optional[DefaultReplyer]: ) -> Optional[DefaultReplyer]:
"""获取回复器对象 """获取回复器对象
@@ -43,7 +42,6 @@ def get_replyer(
Args: Args:
chat_stream: 聊天流对象(优先) chat_stream: 聊天流对象(优先)
chat_id: 聊天ID实际上就是stream_id chat_id: 聊天ID实际上就是stream_id
model_set_with_weight: 模型配置列表,每个元素为 (TaskConfig, weight) 元组
request_type: 请求类型 request_type: 请求类型
Returns: Returns:
@@ -59,7 +57,6 @@ def get_replyer(
return replyer_manager.get_replyer( return replyer_manager.get_replyer(
chat_stream=chat_stream, chat_stream=chat_stream,
chat_id=chat_id, chat_id=chat_id,
model_set_with_weight=model_set_with_weight,
request_type=request_type, request_type=request_type,
) )
except Exception as e: except Exception as e:
@@ -78,13 +75,13 @@ async def generate_reply(
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
action_data: Optional[Dict[str, Any]] = None, action_data: Optional[Dict[str, Any]] = None,
reply_to: str = "", reply_to: str = "",
reply_message: Optional[Dict[str, Any]] = None,
extra_info: str = "", extra_info: str = "",
available_actions: Optional[Dict[str, ActionInfo]] = None, available_actions: Optional[Dict[str, ActionInfo]] = None,
enable_tool: bool = False, enable_tool: bool = False,
enable_splitter: bool = True, enable_splitter: bool = True,
enable_chinese_typo: bool = True, enable_chinese_typo: bool = True,
return_prompt: bool = False, return_prompt: bool = False,
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
request_type: str = "generator_api", request_type: str = "generator_api",
from_plugin: bool = True, from_plugin: bool = True,
) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]: ) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]:
@@ -95,6 +92,7 @@ async def generate_reply(
chat_id: 聊天ID备用 chat_id: 聊天ID备用
action_data: 动作数据向下兼容包含reply_to和extra_info action_data: 动作数据向下兼容包含reply_to和extra_info
reply_to: 回复对象,格式为 "发送者:消息内容" reply_to: 回复对象,格式为 "发送者:消息内容"
reply_message: 回复的原始消息
extra_info: 额外信息,用于补充上下文 extra_info: 额外信息,用于补充上下文
available_actions: 可用动作 available_actions: 可用动作
enable_tool: 是否启用工具调用 enable_tool: 是否启用工具调用
@@ -110,7 +108,7 @@ async def generate_reply(
try: try:
# 获取回复器 # 获取回复器
replyer = get_replyer( replyer = get_replyer(
chat_stream, chat_id, model_set_with_weight=model_set_with_weight, request_type=request_type chat_stream, chat_id, request_type=request_type
) )
if not replyer: if not replyer:
logger.error("[GeneratorAPI] 无法获取回复器") logger.error("[GeneratorAPI] 无法获取回复器")
@@ -131,6 +129,7 @@ async def generate_reply(
enable_tool=enable_tool, enable_tool=enable_tool,
from_plugin=from_plugin, from_plugin=from_plugin,
stream_id=chat_stream.stream_id if chat_stream else chat_id, stream_id=chat_stream.stream_id if chat_stream else chat_id,
reply_message=reply_message,
) )
if not success: if not success:
logger.warning("[GeneratorAPI] 回复生成失败") logger.warning("[GeneratorAPI] 回复生成失败")
@@ -166,11 +165,11 @@ async def rewrite_reply(
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
enable_splitter: bool = True, enable_splitter: bool = True,
enable_chinese_typo: bool = True, enable_chinese_typo: bool = True,
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
raw_reply: str = "", raw_reply: str = "",
reason: str = "", reason: str = "",
reply_to: str = "", reply_to: str = "",
return_prompt: bool = False, return_prompt: bool = False,
request_type: str = "generator_api",
) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]: ) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]:
"""重写回复 """重写回复
@@ -180,7 +179,6 @@ async def rewrite_reply(
chat_id: 聊天ID备用 chat_id: 聊天ID备用
enable_splitter: 是否启用消息分割器 enable_splitter: 是否启用消息分割器
enable_chinese_typo: 是否启用错字生成器 enable_chinese_typo: 是否启用错字生成器
model_set_with_weight: 模型配置列表,每个元素为 (TaskConfig, weight) 元组
raw_reply: 原始回复内容 raw_reply: 原始回复内容
reason: 回复原因 reason: 回复原因
reply_to: 回复对象 reply_to: 回复对象
@@ -191,7 +189,7 @@ async def rewrite_reply(
""" """
try: try:
# 获取回复器 # 获取回复器
replyer = get_replyer(chat_stream, chat_id, model_set_with_weight=model_set_with_weight) replyer = get_replyer(chat_stream, chat_id, request_type=request_type)
if not replyer: if not replyer:
logger.error("[GeneratorAPI] 无法获取回复器") logger.error("[GeneratorAPI] 无法获取回复器")
return False, [], None return False, [], None
@@ -258,10 +256,10 @@ def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo:
async def generate_response_custom( async def generate_response_custom(
chat_stream: Optional[ChatStream] = None, chat_stream: Optional[ChatStream] = None,
chat_id: Optional[str] = None, chat_id: Optional[str] = None,
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None, request_type: str = "generator_api",
prompt: str = "", prompt: str = "",
) -> Optional[str]: ) -> Optional[str]:
replyer = get_replyer(chat_stream, chat_id, model_set_with_weight=model_set_with_weight) replyer = get_replyer(chat_stream, chat_id, request_type=request_type)
if not replyer: if not replyer:
logger.error("[GeneratorAPI] 无法获取回复器") logger.error("[GeneratorAPI] 无法获取回复器")
return None return None

View File

@@ -32,7 +32,7 @@ import traceback
import time import time
import difflib import difflib
import asyncio import asyncio
from typing import Optional, Union, Dict from typing import Optional, Union, Dict, Any
from src.common.logger import get_logger from src.common.logger import get_logger
# 导入依赖 # 导入依赖
@@ -88,7 +88,8 @@ async def _send_to_target(
display_message: str = "", display_message: str = "",
typing: bool = False, typing: bool = False,
reply_to: str = "", reply_to: str = "",
reply_to_platform_id: Optional[str] = None, set_reply: bool = False,
reply_to_message: Optional[Dict[str, Any]] = None,
storage_message: bool = True, storage_message: bool = True,
show_log: bool = True, show_log: bool = True,
) -> bool: ) -> bool:
@@ -101,7 +102,6 @@ async def _send_to_target(
display_message: 显示消息 display_message: 显示消息
typing: 是否模拟打字等待。 typing: 是否模拟打字等待。
reply_to: 回复消息,格式为"发送者:消息内容" reply_to: 回复消息,格式为"发送者:消息内容"
reply_to_platform_id: 回复消息,格式为"平台:用户ID",如果不提供则自动查找(插件开发者禁用!)
storage_message: 是否存储消息到数据库 storage_message: 是否存储消息到数据库
show_log: 发送是否显示日志 show_log: 发送是否显示日志
@@ -109,6 +109,9 @@ async def _send_to_target(
bool: 是否发送成功 bool: 是否发送成功
""" """
try: try:
if reply_to:
logger.warning("[SendAPI] 在0.10.0, reply_to 参数已弃用,请使用 reply_to_message 参数")
if show_log: if show_log:
logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}") logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}")
@@ -135,14 +138,14 @@ async def _send_to_target(
# 创建消息段 # 创建消息段
message_segment = Seg(type=message_type, data=content) # type: ignore message_segment = Seg(type=message_type, data=content) # type: ignore
# 处理回复消息 if reply_to_message:
anchor_message = None anchor_message = MessageRecv(message_dict=reply_to_message)
if reply_to: anchor_message.update_chat_stream(target_stream)
anchor_message = await _find_reply_message(target_stream, reply_to) reply_to_platform_id = (
if anchor_message and anchor_message.message_info.user_info and not reply_to_platform_id: f"{anchor_message.message_info.platform}:{anchor_message.message_info.user_info.user_id}"
reply_to_platform_id = ( )
f"{anchor_message.message_info.platform}:{anchor_message.message_info.user_info.user_id}" else:
) anchor_message = None
# 构建发送消息对象 # 构建发送消息对象
bot_message = MessageSending( bot_message = MessageSending(
@@ -163,7 +166,7 @@ async def _send_to_target(
sent_msg = await heart_fc_sender.send_message( sent_msg = await heart_fc_sender.send_message(
bot_message, bot_message,
typing=typing, typing=typing,
set_reply=(anchor_message is not None), set_reply=set_reply,
storage_message=storage_message, storage_message=storage_message,
show_log=show_log, show_log=show_log,
) )
@@ -298,7 +301,8 @@ async def text_to_stream(
stream_id: str, stream_id: str,
typing: bool = False, typing: bool = False,
reply_to: str = "", reply_to: str = "",
reply_to_platform_id: str = "", reply_to_message: Optional[Dict[str, Any]] = None,
set_reply: bool = False,
storage_message: bool = True, storage_message: bool = True,
) -> bool: ) -> bool:
"""向指定流发送文本消息 """向指定流发送文本消息
@@ -308,7 +312,6 @@ async def text_to_stream(
stream_id: 聊天流ID stream_id: 聊天流ID
typing: 是否显示正在输入 typing: 是否显示正在输入
reply_to: 回复消息,格式为"发送者:消息内容" reply_to: 回复消息,格式为"发送者:消息内容"
reply_to_platform_id: 回复消息,格式为"平台:用户ID",如果不提供则自动查找(插件开发者禁用!)
storage_message: 是否存储消息到数据库 storage_message: 是否存储消息到数据库
Returns: Returns:
@@ -321,12 +324,13 @@ async def text_to_stream(
"", "",
typing, typing,
reply_to, reply_to,
reply_to_platform_id=reply_to_platform_id, set_reply=set_reply,
reply_to_message=reply_to_message,
storage_message=storage_message, storage_message=storage_message,
) )
async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bool = True) -> bool: async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False) -> bool:
"""向指定流发送表情包 """向指定流发送表情包
Args: Args:
@@ -337,10 +341,10 @@ async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bo
Returns: Returns:
bool: 是否发送成功 bool: 是否发送成功
""" """
return await _send_to_target("emoji", emoji_base64, stream_id, "", typing=False, storage_message=storage_message) return await _send_to_target("emoji", emoji_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply)
async def image_to_stream(image_base64: str, stream_id: str, storage_message: bool = True) -> bool: async def image_to_stream(image_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False) -> bool:
"""向指定流发送图片 """向指定流发送图片
Args: Args:
@@ -351,11 +355,11 @@ async def image_to_stream(image_base64: str, stream_id: str, storage_message: bo
Returns: Returns:
bool: 是否发送成功 bool: 是否发送成功
""" """
return await _send_to_target("image", image_base64, stream_id, "", typing=False, storage_message=storage_message) return await _send_to_target("image", image_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply)
async def command_to_stream( async def command_to_stream(
command: Union[str, dict], stream_id: str, storage_message: bool = True, display_message: str = "" command: Union[str, dict], stream_id: str, storage_message: bool = True, display_message: str = "", set_reply: bool = False
) -> bool: ) -> bool:
"""向指定流发送命令 """向指定流发送命令
@@ -379,6 +383,8 @@ async def custom_to_stream(
display_message: str = "", display_message: str = "",
typing: bool = False, typing: bool = False,
reply_to: str = "", reply_to: str = "",
reply_to_message: Optional[Dict[str, Any]] = None,
set_reply: bool = False,
storage_message: bool = True, storage_message: bool = True,
show_log: bool = True, show_log: bool = True,
) -> bool: ) -> bool:
@@ -403,6 +409,8 @@ async def custom_to_stream(
display_message=display_message, display_message=display_message,
typing=typing, typing=typing,
reply_to=reply_to, reply_to=reply_to,
reply_to_message=reply_to_message,
set_reply=set_reply,
storage_message=storage_message, storage_message=storage_message,
show_log=show_log, show_log=show_log,
) )

View File

@@ -48,7 +48,7 @@ class ContentService:
try: try:
# 获取模型配置 # 获取模型配置
models = llm_api.get_available_models() models = llm_api.get_available_models()
text_model = str(self.get_config("models.text_model", "replyer_1")) text_model = str(self.get_config("models.text_model", "replyer"))
model_config = models.get(text_model) model_config = models.get(text_model)
if not model_config: if not model_config:
@@ -278,7 +278,7 @@ class ContentService:
try: try:
# 获取模型配置 # 获取模型配置
models = llm_api.get_available_models() models = llm_api.get_available_models()
text_model = str(self.get_config("models.text_model", "replyer_1")) text_model = str(self.get_config("models.text_model", "replyer"))
model_config = models.get(text_model) model_config = models.get(text_model)
if not model_config: if not model_config:

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "1.2.9" version = "1.3.0"
# 配置文件版本号迭代规则同bot_config.toml # 配置文件版本号迭代规则同bot_config.toml
@@ -132,16 +132,11 @@ model_list = ["qwen3-8b"]
temperature = 0.7 temperature = 0.7
max_tokens = 800 max_tokens = 800
[model_task_config.replyer_1] # 首要回复模型,还用于表达器和表达方式学习 [model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习
model_list = ["siliconflow-deepseek-v3"] model_list = ["siliconflow-deepseek-v3"]
temperature = 0.2 # 模型温度新V3建议0.1-0.3 temperature = 0.2 # 模型温度新V3建议0.1-0.3
max_tokens = 800 max_tokens = 800
[model_task_config.replyer_2] # 次要回复模型
model_list = ["siliconflow-deepseek-v3"]
temperature = 0.7
max_tokens = 800
[model_task_config.planner] #决策:负责决定麦麦该做什么的模型 [model_task_config.planner] #决策:负责决定麦麦该做什么的模型
model_list = ["siliconflow-deepseek-v3"] model_list = ["siliconflow-deepseek-v3"]
temperature = 0.3 temperature = 0.3