迁移:1f91967(remove:移除willing系统,移除reply2,移除能量值,移除reply_to改为message)

This commit is contained in:
Windpicker-owo
2025-09-03 21:27:28 +08:00
parent 57ba493a74
commit 51aad4f952
16 changed files with 151 additions and 314 deletions

View File

@@ -1,6 +1,8 @@
import asyncio
import time
import traceback
import math
import random
from typing import Optional, Dict, Any, Tuple
from src.chat.message_receive.chat_stream import get_chat_manager
@@ -10,7 +12,7 @@ from src.config.config import global_config
from src.chat.planner_actions.planner import ActionPlanner
from src.chat.planner_actions.action_modifier import ActionModifier
from src.person_info.person_info import get_person_info_manager
from src.plugin_system.apis import database_api
from src.plugin_system.apis import database_api, generator_api
from src.plugin_system.base.component_types import ChatMode
from src.mais4u.constant_s4u import ENABLE_S4U
from src.chat.chat_loop.hfc_utils import send_typing, stop_typing
@@ -44,7 +46,6 @@ class CycleProcessor:
async def _send_and_store_reply(
self,
response_set,
reply_to_str,
loop_start_time,
action_message,
cycle_timers: Dict[str, float],
@@ -52,7 +53,7 @@ class CycleProcessor:
actions,
) -> Tuple[Dict[str, Any], str, Dict[str, float]]:
with Timer("回复发送", cycle_timers):
reply_text = await self.response_handler.send_response(response_set, reply_to_str, loop_start_time, action_message)
reply_text = await self.response_handler.send_response(response_set, loop_start_time, action_message)
# 存储reply action信息
person_info_manager = get_person_info_manager()
@@ -60,7 +61,7 @@ class CycleProcessor:
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
platform = action_message.get("chat_info_platform")
if platform is None:
platform = getattr(self.chat_stream, "platform", "unknown")
platform = getattr(self.context.chat_stream, "platform", "unknown")
person_id = person_info_manager.get_person_id(
platform,
@@ -75,7 +76,7 @@ class CycleProcessor:
action_prompt_display=action_prompt_display,
action_done=True,
thinking_id=thinking_id,
action_data={"reply_text": reply_text, "reply_to": reply_to_str},
action_data={"reply_text": reply_text},
action_name="reply",
)
@@ -113,12 +114,6 @@ class CycleProcessor:
"""
action_type = "no_action"
reply_text = "" # 初始化reply_text变量避免UnboundLocalError
reply_to_str = "" # 初始化reply_to_str变量
# 根据interest_value计算概率决定使用哪种planner模式
# interest_value越高越倾向于使用Normal模式
import random
import math
# 使用sigmoid函数将interest_value转换为概率
# 当interest_value为0时概率接近0使用Focus模式
@@ -224,40 +219,23 @@ class CycleProcessor:
"command": command
}
else:
# 执行回复动作
try:
reply_to_str = await self._build_reply_to_str(action_info["action_message"])
except UserWarning:
logger.warning("选取了自己作为回复对象,跳过回复生成")
return {
"action_type": "reply",
"success": False,
"reply_text": "",
"loop_info": None
}
# 生成回复
gather_timeout = global_config.chat.thinking_timeout
try:
response_set = await asyncio.wait_for(
self.response_handler.generate_response(
message_data=action_info["action_message"],
available_actions=action_info["available_actions"],
reply_to=reply_to_str,
request_type="chat.replyer",
),
timeout=gather_timeout
success, response_set, _ = await generator_api.generate_reply(
chat_stream=self.context.chat_stream,
reply_message = action_info["action_message"],
available_actions=available_actions,
enable_tool=global_config.tool.enable_tool,
request_type="chat.replyer",
from_plugin=False,
)
except asyncio.TimeoutError:
logger.warning(
f"{self.log_prefix} 并行执行:回复生成超时>{global_config.chat.thinking_timeout}s已跳过"
)
return {
"action_type": "reply",
"success": False,
"reply_text": "",
"loop_info": None
}
if not success or not response_set:
logger.info(f"{action_info['action_message'].get('processed_plain_text')} 的回复生成失败")
return {
"action_type": "reply",
"success": False,
"reply_text": "",
"loop_info": None
}
except asyncio.CancelledError:
logger.debug(f"{self.log_prefix} 并行执行:回复生成任务已被取消")
return {
@@ -267,18 +245,8 @@ class CycleProcessor:
"loop_info": None
}
if not response_set:
logger.warning(f"{self.log_prefix} 模型超时或生成回复内容为空")
return {
"action_type": "reply",
"success": False,
"reply_text": "",
"loop_info": None
}
loop_info, reply_text, cycle_timers_reply = await self._send_and_store_reply(
response_set,
reply_to_str,
loop_start_time,
action_info["action_message"],
cycle_timers,
@@ -303,7 +271,6 @@ class CycleProcessor:
}
# 创建所有动作的后台任务
action_tasks = [asyncio.create_task(execute_action(action)) for action in actions]
# 并行执行所有任务
@@ -367,7 +334,6 @@ class CycleProcessor:
self.context.chat_instance.cycle_tracker.end_cycle(loop_info, cycle_timers)
self.context.chat_instance.cycle_tracker.print_cycle_info(cycle_timers)
# await self.willing_manager.after_generate_reply_handle(message_data.get("message_id", ""))
action_type = actions[0]["action_type"] if actions else "no_action"
# 管理no_reply计数器当执行了非no_reply动作时重置计数器
if action_type != "no_reply":
@@ -688,36 +654,6 @@ class CycleProcessor:
"action_prompt": "",
}
async def _build_reply_to_str(self, message_data: dict):
"""
构建回复目标字符串
Args:
message_data: 消息数据字典
Returns:
str: 格式化的回复目标字符串,格式为"用户名:消息内容"
功能说明:
- 从消息数据中提取平台和用户ID信息
- 通过人员信息管理器获取用户昵称
- 构建用于回复显示的格式化字符串
"""
from src.person_info.person_info import get_person_info_manager
person_info_manager = get_person_info_manager()
platform = (
message_data.get("chat_info_platform")
or message_data.get("user_platform")
or (self.context.chat_stream.platform if self.context.chat_stream else "default")
)
user_id = message_data.get("user_id", "")
if user_id == str(global_config.bot.qq_account) and platform == global_config.bot.platform:
raise UserWarning
person_id = person_info_manager.get_person_id(platform, user_id)
person_name = await person_info_manager.get_value(person_id, "person_name")
return f"{person_name}:{message_data.get('processed_plain_text')}"
def _build_final_loop_info(self, reply_loop_info, action_success, action_reply_text, action_command, plan_result):
"""
构建最终的循环信息

View File

@@ -57,26 +57,6 @@ class EnergyManager:
await asyncio.sleep(0)
logger.info(f"{self.context.log_prefix} 能量管理器已停止")
def _handle_energy_completion(self, task: asyncio.Task):
"""
处理能量循环任务完成
Args:
task: 完成的异步任务对象
功能说明:
- 处理任务正常完成或异常情况
- 记录相应的日志信息
- 区分取消和异常终止的情况
"""
try:
if exception := task.exception():
logger.error(f"{self.context.log_prefix} 能量循环异常: {exception}")
else:
logger.info(f"{self.context.log_prefix} 能量循环正常结束")
except asyncio.CancelledError:
logger.info(f"{self.context.log_prefix} 能量循环被取消")
async def _energy_loop(self):
"""
能量与睡眠压力管理的主循环

View File

@@ -12,14 +12,12 @@ from src.chat.express.expression_learner import expression_learner_manager
from src.plugin_system.base.component_types import ChatMode
from src.schedule.schedule_manager import schedule_manager, SleepState
from src.plugin_system.apis import message_api
from src.chat.willing.willing_manager import get_willing_manager
from .hfc_context import HfcContext
from .energy_manager import EnergyManager
from .proactive_thinker import ProactiveThinker
from .cycle_processor import CycleProcessor
from .response_handler import ResponseHandler
from .normal_mode_handler import NormalModeHandler
from .cycle_tracker import CycleTracker
from .wakeup_manager import WakeUpManager
@@ -47,7 +45,6 @@ class HeartFChatting:
self.cycle_processor = CycleProcessor(self.context, self.response_handler, self.cycle_tracker)
self.energy_manager = EnergyManager(self.context)
self.proactive_thinker = ProactiveThinker(self.context, self.cycle_processor)
self.normal_mode_handler = NormalModeHandler(self.context, self.cycle_processor)
self.wakeup_manager = WakeUpManager(self.context)
# 将唤醒度管理器设置到上下文中
@@ -60,7 +57,6 @@ class HeartFChatting:
# 记录最近3次的兴趣度
self.recent_interest_records: deque = deque(maxlen=3)
self.willing_manager = get_willing_manager()
self._initialize_chat_mode()
logger.info(f"{self.context.log_prefix} HeartFChatting 初始化完成")
@@ -97,7 +93,7 @@ class HeartFChatting:
self.context.relationship_builder = relationship_builder_manager.get_or_create_builder(self.context.stream_id)
self.context.expression_learner = expression_learner_manager.get_expression_learner(self.context.stream_id)
await self.energy_manager.start()
#await self.energy_manager.start()
await self.proactive_thinker.start()
await self.wakeup_manager.start()
@@ -120,7 +116,7 @@ class HeartFChatting:
return
self.context.running = False
await self.energy_manager.stop()
#await self.energy_manager.stop()
await self.proactive_thinker.stop()
await self.wakeup_manager.stop()
@@ -245,8 +241,6 @@ class HeartFChatting:
# 统一使用 _should_process_messages 判断是否应该处理
should_process,interest_value = await self._should_process_messages(recent_messages if has_new_messages else None)
if should_process:
#earliest_message_data = recent_messages[0]
#self.last_read_time = earliest_message_data.get("time")
self.context.last_read_time = time.time()
await self.cycle_processor.observe(interest_value = interest_value)
else:
@@ -418,6 +412,7 @@ class HeartFChatting:
# talk_frequency = global_config.chat.get_current_talk_frequency(self.context.chat_stream.stream_id)
modified_exit_count_threshold = self.context.focus_energy / global_config.chat.focus_value
modified_exit_interest_threshold = 3 / global_config.chat.focus_value
total_interest = 0.0
for msg_dict in new_message:
interest_value = msg_dict.get("interest_value", 0.0)
@@ -441,11 +436,11 @@ class HeartFChatting:
if not hasattr(self, "_last_accumulated_interest") or total_interest != self._last_accumulated_interest:
logger.info(f"{self.context.log_prefix} breaking形式当前累计兴趣值: {total_interest:.2f}, 专注度: {global_config.chat.focus_value:.1f}")
self._last_accumulated_interest = total_interest
if total_interest >= 3 / global_config.chat.focus_value:
if total_interest >= modified_exit_interest_threshold:
# 记录兴趣度到列表
self.recent_interest_records.append(total_interest)
logger.info(
f"{self.context.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{3 / global_config.chat.focus_value}),结束等待"
f"{self.context.log_prefix} 累计兴趣值达到{total_interest:.2f}(>{modified_exit_interest_threshold:.1f}),结束等待"
)
return True,total_interest/new_message_count

View File

@@ -1,84 +0,0 @@
import random
from typing import Dict, Any, TYPE_CHECKING
from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.willing.willing_manager import get_willing_manager
from .hfc_context import HfcContext
if TYPE_CHECKING:
from .cycle_processor import CycleProcessor
logger = get_logger("hfc.normal_mode")
class NormalModeHandler:
def __init__(self, context: HfcContext, cycle_processor: "CycleProcessor"):
"""
初始化普通模式处理器
Args:
context: HFC聊天上下文对象
cycle_processor: 循环处理器,用于处理决定回复的消息
功能说明:
- 处理NORMAL模式下的消息
- 根据兴趣度和回复概率决定是否回复
- 管理意愿系统和回复概率计算
"""
self.context = context
self.cycle_processor = cycle_processor
self.willing_manager = get_willing_manager()
async def handle_message(self, message_data: Dict[str, Any]) -> bool:
"""
处理NORMAL模式下的单条消息
Args:
message_data: 消息数据字典,包含用户信息、消息内容、兴趣值等
Returns:
bool: 是否进行了回复处理
功能说明:
- 计算消息的兴趣度和基础回复概率
- 应用谈话频率调整回复概率
- 过滤表情和图片消息设置回复概率为0
- 根据概率随机决定是否回复
- 如果决定回复则调用循环处理器进行处理
- 记录详细的决策日志
"""
if not self.context.chat_stream:
return False
interested_rate = message_data.get("interest_value") or 0.0
self.willing_manager.setup(message_data, self.context.chat_stream)
reply_probability = await self.willing_manager.get_reply_probability(message_data.get("message_id", ""))
if reply_probability < 1:
additional_config = message_data.get("additional_config", {})
if additional_config and "maimcore_reply_probability_gain" in additional_config:
reply_probability += additional_config["maimcore_reply_probability_gain"]
reply_probability = min(max(reply_probability, 0), 1)
talk_frequency = global_config.chat.get_current_talk_frequency(self.context.stream_id)
reply_probability = talk_frequency * reply_probability
if message_data.get("is_emoji") or message_data.get("is_picid"):
reply_probability = 0
mes_name = self.context.chat_stream.group_info.group_name if self.context.chat_stream.group_info else "私聊"
if reply_probability > 0.05:
logger.info(
f"[{mes_name}]"
f"{message_data.get('user_nickname')}:"
f"{message_data.get('processed_plain_text')}[兴趣:{interested_rate:.2f}][回复概率:{reply_probability * 100:.1f}%]"
)
if random.random() < reply_probability:
await self.willing_manager.before_generate_reply_handle(message_data.get("message_id", ""))
await self.cycle_processor.observe(message_data=message_data)
return True
self.willing_manager.delete(message_data.get("message_id", ""))
return False

View File

@@ -105,7 +105,7 @@ class ResponseHandler:
return loop_info, reply_text, cycle_timers
async def send_response(self, reply_set, reply_to, thinking_start_time, message_data) -> str:
async def send_response(self, reply_set, thinking_start_time, message_data) -> str:
"""
发送回复内容的具体实现
@@ -129,9 +129,6 @@ class ResponseHandler:
new_message_count = message_api.count_new_messages(
chat_id=self.context.stream_id, start_time=thinking_start_time, end_time=current_time
)
platform = message_data.get("user_platform", "")
user_id = message_data.get("user_id", "")
reply_to_platform_id = f"{platform}:{user_id}"
need_reply = new_message_count >= random.randint(2, 4)
@@ -157,32 +154,26 @@ class ResponseHandler:
continue
if not first_replied:
if need_reply:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to=reply_to,
reply_to_platform_id=reply_to_platform_id,
typing=False,
)
else:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to_platform_id=reply_to_platform_id,
typing=False,
)
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to_message = message_data,
set_reply=need_reply,
typing=False,
)
first_replied = True
else:
await send_api.text_to_stream(
text=data,
stream_id=self.context.stream_id,
reply_to_platform_id=reply_to_platform_id,
reply_to_message = message_data,
set_reply=need_reply,
typing=True,
)
return reply_text
# TODO: 已废弃
async def generate_response(
self,
message_data: dict,

View File

@@ -61,7 +61,7 @@ def init_prompt() -> None:
class ExpressionLearner:
def __init__(self, chat_id: str) -> None:
self.express_learn_model: LLMRequest = LLMRequest(
model_set=model_config.model_task_config.replyer, request_type="expression.learner"
model_set=model_config.model_task_config.replyer, request_type="expressor.learner"
)
self.chat_id = chat_id
self.chat_name = get_chat_manager().get_stream_name(chat_id) or chat_id

View File

@@ -51,13 +51,13 @@ def init_prompt():
动作reply
动作描述:参与聊天回复,发送文本进行表达
- 你想要闲聊或者随便附
- 有人提到了你,但是你还没有回应
- 你想要闲聊或者随便附
- {mentioned_bonus}
- 如果你刚刚进行了回复,不要对同一个话题重复回应
- 不要回复自己发送的消息
{{
"action": "reply",
"target_message_id":"想要回复的消息id",
"target_message_id":"触发action的消息id",
"reason":"回复的原因"
}}
@@ -316,7 +316,7 @@ class ActionPlanner:
if key not in ["action", "reason"]:
action_data[key] = value
# 在FOCUS模式下非no_reply动作需要target_message_id
# 非no_reply动作需要target_message_id
if action != "no_reply":
if target_message_id := parsed_json.get("target_message_id"):
# 根据target_message_id查找原始消息
@@ -341,9 +341,7 @@ class ActionPlanner:
logger.warning(f"{self.log_prefix}动作'{action}'缺少target_message_id")
if action == "no_action":
reasoning = "normal决定不使用额外动作"
elif action != "no_reply" and action != "reply" and action not in current_available_actions:
if action != "no_reply" and action != "reply" and action not in current_available_actions:
logger.warning(
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
)
@@ -483,32 +481,15 @@ class ActionPlanner:
mentioned_bonus = "\n- 有人提到你或者at你"
if mode == ChatMode.FOCUS:
no_action_block = f"""重要说明:
- 'no_reply' 表示只进行不进行回复,等待合适的回复时机
no_action_block = """重要说明:
- 'no_reply' 表示不进行回复,等待合适的回复时机
- 当你刚刚发送了消息没有人回复时选择no_reply
- 当你一次发送了太多消息为了避免打扰聊天节奏选择no_reply
动作reply
动作描述:参与聊天回复,发送文本进行表达
- 你想要闲聊或者随便附和{mentioned_bonus}
- 如果你刚刚进行了回复,不要对同一个话题重复回应
- 不要回复自己发送的消息
{{
"action": "no_reply",
"reason":"不回复的原因"
}}
"""
else: # NORMAL Mode
no_action_block = f"""重要说明:
no_action_block = """重要说明:
- 'reply' 表示只进行普通聊天回复,不执行任何额外动作
- 其他action表示在普通回复的基础上执行相应的额外动作
动作reply
动作描述:参与聊天回复,发送文本进行表达
- 你想要闲聊或者随便附
- {mentioned_bonus}
- 如果你刚刚进行了回复,不要对同一个话题重复回应
- 不要回复自己发送的消息
{{
"action": "reply",
"target_message_id":"触发action的消息id",
@@ -516,6 +497,7 @@ class ActionPlanner:
}}"""
chat_context_description = "你现在正在一个群聊中"
chat_target_name = None
if not is_group_chat and chat_target_info:
chat_target_name = (
chat_target_info.get("person_name")
@@ -546,6 +528,7 @@ class ActionPlanner:
chat_context_description=chat_context_description,
chat_content_block=chat_content_block,
actions_before_now_block=actions_before_now_block,
mentioned_bonus=mentioned_bonus,
no_action_block=no_action_block,
mentioned_bonus=mentioned_bonus,
action_options_text=action_options_block,

View File

@@ -240,14 +240,6 @@ class DefaultReplyer:
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id)
def _select_weighted_models_config(self) -> Tuple[TaskConfig, float]:
"""使用加权随机选择来挑选一个模型配置"""
configs = self.model_set
# 提取权重,如果模型配置中没有'weight'键则默认为1.0
weights = [weight for _, weight in configs]
return random.choices(population=configs, weights=weights, k=1)[0]
async def generate_reply_with_context(
self,
extra_info: str = "",
@@ -258,7 +250,7 @@ class DefaultReplyer:
from_plugin: bool = True,
stream_id: Optional[str] = None,
reply_message: Optional[Dict[str, Any]] = None,
) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str], List[Dict[str, Any]]]:
) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str]]:
# sourcery skip: merge-nested-ifs
"""
回复器 (Replier): 负责生成回复文本的核心逻辑。
@@ -290,7 +282,6 @@ class DefaultReplyer:
choosen_actions=choosen_actions,
enable_tool=enable_tool,
reply_message=reply_message,
reply_reason=reply_reason,
)
if not prompt:
@@ -579,7 +570,6 @@ class DefaultReplyer:
if not enable_tool:
return ""
try:
# 使用工具执行器获取信息
tool_results, _, _ = await self.tool_executor.execute_from_chat_message(
@@ -826,7 +816,7 @@ class DefaultReplyer:
choosen_actions: Optional[List[Dict[str, Any]]] = None,
enable_tool: bool = True,
reply_message: Optional[Dict[str, Any]] = None,
) -> Tuple[str, List[int]]:
) -> str:
"""
构建回复器上下文
@@ -838,6 +828,7 @@ class DefaultReplyer:
enable_timeout: 是否启用超时处理
enable_tool: 是否启用工具调用
reply_message: 回复的原始消息
Returns:
str: 构建好的上下文
"""
@@ -871,6 +862,28 @@ class DefaultReplyer:
else:
mood_prompt = ""
if reply_to:
#兼容旧的reply_to
sender, target = self._parse_reply_target(reply_to)
else:
# 获取 platform如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
platform = reply_message.get("chat_info_platform")
person_id = person_info_manager.get_person_id(
platform, # type: ignore
reply_message.get("user_id"), # type: ignore
)
person_name = await person_info_manager.get_value(person_id, "person_name")
sender = person_name
target = reply_message.get('processed_plain_text')
person_info_manager = get_person_info_manager()
person_id = person_info_manager.get_person_id_by_person_name(sender)
user_id = person_info_manager.get_value_sync(person_id, "user_id")
platform = chat_stream.platform
if user_id == global_config.bot.qq_account and platform == global_config.bot.platform:
logger.warning("选取了自身作为回复对象跳过构建prompt")
return ""
target = replace_user_references_sync(target, chat_stream.platform, replace_bot_name=True)
@@ -905,11 +918,11 @@ class DefaultReplyer:
self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits"
),
self._time_and_run_task(self.build_relation_info(sender, target), "relation_info"),
self._time_and_run_task(self.build_memory_block(message_list_before_short, target), "memory_block"),
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"),
self._time_and_run_task(
self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info"
),
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, reply_to), "prompt_info"),
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
self._time_and_run_task(
PromptUtils.build_cross_context(chat_id, target_user_info, global_config.personality.prompt_mode),
"cross_context",
@@ -1034,14 +1047,14 @@ class DefaultReplyer:
reason: str,
reply_to: str,
reply_message: Optional[Dict[str, Any]] = None,
) -> Tuple[str, List[int]]: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
chat_stream = self.chat_stream
chat_id = chat_stream.stream_id
is_group_chat = bool(chat_stream.group_info)
if reply_message:
sender = reply_message.get("sender", "")
target = reply_message.get("target", "")
sender = reply_message.get("sender")
target = reply_message.get("target")
else:
sender, target = self._parse_reply_target(reply_to)
@@ -1206,7 +1219,6 @@ class DefaultReplyer:
start_time = time.time()
from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
# 从LPMM知识库获取知识
try:
@@ -1253,16 +1265,11 @@ class DefaultReplyer:
logger.error(f"获取知识库内容时发生异常: {str(e)}")
return ""
async def build_relation_info(self, reply_to: str = ""):
async def build_relation_info(self, sender: str, target: str):
if not global_config.relationship.enable_relationship:
return ""
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
if not reply_to:
return ""
sender, text = self._parse_reply_target(reply_to)
if not sender or not text:
return ""
# 获取用户ID
person_info_manager = get_person_info_manager()

View File

@@ -106,8 +106,7 @@ class ModelTaskConfig(ValidatedConfigBase):
# 必需配置项
utils: TaskConfig = Field(..., description="组件模型配置")
utils_small: TaskConfig = Field(..., description="组件小模型配置")
replyer_1: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置")
replyer_2: TaskConfig = Field(..., description="normal_chat次要回复模型配置")
replyer: TaskConfig = Field(..., description="normal_chat首要回复模型模型配置")
maizone: TaskConfig = Field(..., description="maizone专用模型")
emotion: TaskConfig = Field(..., description="情绪模型配置")
vlm: TaskConfig = Field(..., description="视觉语言模型配置")

View File

@@ -258,7 +258,6 @@ class MessageReceiveConfig(ValidatedConfigBase):
class NormalChatConfig(ValidatedConfigBase):
"""普通聊天配置类"""
willing_mode: str = Field(default="classical", description="意愿模式")
class ExpressionRule(ValidatedConfigBase):

View File

@@ -189,6 +189,7 @@ MoFox_Bot(第三方修改版)
get_emoji_manager().initialize()
logger.info("表情包管理器初始化成功")
# 启动情绪管理器
await mood_manager.start()
logger.info("情绪管理器初始化成功")

View File

@@ -13,11 +13,28 @@ logger = get_logger("s4u_stream_generator")
class S4UStreamGenerator:
def __init__(self):
# 使用LLMRequest替代AsyncOpenAIClient
self.llm_request = LLMRequest(
model_set=model_config.model_task_config.replyer,
request_type="s4u_replyer"
)
replyer_config = model_config.model_task_config.replyer
model_to_use = replyer_config.model_list[0]
model_info = model_config.get_model_info(model_to_use)
if not model_info:
logger.error(f"模型 {model_to_use} 在配置中未找到")
raise ValueError(f"模型 {model_to_use} 在配置中未找到")
provider_name = model_info.api_provider
provider_info = model_config.get_provider(provider_name)
if not provider_info:
logger.error("`replyer` 找不到对应的Provider")
raise ValueError("`replyer` 找不到对应的Provider")
api_key = provider_info.api_key
base_url = provider_info.base_url
if not api_key:
logger.error(f"{provider_name}没有配置API KEY")
raise ValueError(f"{provider_name}没有配置API KEY")
self.client_1 = AsyncOpenAIClient(api_key=api_key, base_url=base_url)
self.model_1_name = model_to_use
self.replyer_config = replyer_config
self.current_model_name = "unknown model"
self.partial_response = ""
@@ -83,8 +100,18 @@ class S4UStreamGenerator:
f"{self.current_model_name}思考:{message_txt[:30] + '...' if len(message_txt) > 30 else message_txt}"
) # noqa: E501
# 使用LLMRequest进行流式生成
async for chunk in self._generate_response_with_llm_request(prompt):
current_client = self.client_1
self.current_model_name = self.model_1_name
extra_kwargs = {}
if self.replyer_config.get("enable_thinking") is not None:
extra_kwargs["enable_thinking"] = self.replyer_config.get("enable_thinking")
if self.replyer_config.get("thinking_budget") is not None:
extra_kwargs["thinking_budget"] = self.replyer_config.get("thinking_budget")
async for chunk in self._generate_response_with_model(
prompt, current_client, self.current_model_name, **extra_kwargs
):
yield chunk
async def _generate_response_with_llm_request(self, prompt: str) -> AsyncGenerator[str, None]:

View File

@@ -73,6 +73,7 @@ async def generate_reply(
chat_stream: Optional[ChatStream] = None,
chat_id: Optional[str] = None,
action_data: Optional[Dict[str, Any]] = None,
reply_to: str = "",
reply_message: Optional[Dict[str, Any]] = None,
extra_info: str = "",
reply_reason: str = "",
@@ -92,7 +93,8 @@ async def generate_reply(
chat_stream: 聊天流对象(优先)
chat_id: 聊天ID备用
action_data: 动作数据向下兼容包含reply_to和extra_info
reply_message: 回复的消息对象
reply_to: 回复对象,格式为 "发送者:消息内容"
reply_message: 回复的原始消息
extra_info: 额外信息,用于补充上下文
reply_reason: 回复原因
available_actions: 可用动作
@@ -133,6 +135,7 @@ async def generate_reply(
reply_reason=reply_reason,
from_plugin=from_plugin,
stream_id=chat_stream.stream_id if chat_stream else chat_id,
reply_message=reply_message,
)
if not success:
logger.warning("[GeneratorAPI] 回复生成失败")
@@ -188,7 +191,6 @@ async def rewrite_reply(
chat_id: 聊天ID备用
enable_splitter: 是否启用消息分割器
enable_chinese_typo: 是否启用错字生成器
model_set_with_weight: 模型配置列表,每个元素为 (TaskConfig, weight) 元组
raw_reply: 原始回复内容
reason: 回复原因
reply_to: 回复对象

View File

@@ -32,7 +32,7 @@ import traceback
import time
import difflib
import asyncio
from typing import Optional, Union, Dict
from typing import Optional, Union, Dict, Any
from src.common.logger import get_logger
# 导入依赖
@@ -87,8 +87,9 @@ async def _send_to_target(
stream_id: str,
display_message: str = "",
typing: bool = False,
reply_to: str = "",
set_reply: bool = False,
reply_message: Optional[Dict[str, Any]] = None,
reply_to_message: Optional[Dict[str, Any]] = None,
storage_message: bool = True,
show_log: bool = True,
selected_expressions:List[int] = None,
@@ -109,9 +110,8 @@ async def _send_to_target(
bool: 是否发送成功
"""
try:
if set_reply and not reply_message:
logger.warning("[SendAPI] 使用引用回复,但未提供回复消息")
return False
if reply_to:
logger.warning("[SendAPI] 在0.10.0, reply_to 参数已弃用,请使用 reply_to_message 参数")
if show_log:
logger.debug(f"[SendAPI] 发送{message_type}消息到 {stream_id}")
@@ -139,16 +139,13 @@ async def _send_to_target(
# 创建消息段
message_segment = Seg(type=message_type, data=content) # type: ignore
if reply_message:
anchor_message = message_dict_to_message_recv(reply_message)
if anchor_message:
anchor_message.update_chat_stream(target_stream)
assert anchor_message.message_info.user_info, "用户信息缺失"
reply_to_platform_id = (
f"{anchor_message.message_info.platform}:{anchor_message.message_info.user_info.user_id}"
)
if reply_to_message:
anchor_message = MessageRecv(message_dict=reply_to_message)
anchor_message.update_chat_stream(target_stream)
reply_to_platform_id = (
f"{anchor_message.message_info.platform}:{anchor_message.message_info.user_info.user_id}"
)
else:
reply_to_platform_id = ""
anchor_message = None
# 构建发送消息对象
@@ -249,8 +246,9 @@ async def text_to_stream(
text: str,
stream_id: str,
typing: bool = False,
reply_to: str = "",
reply_to_message: Optional[Dict[str, Any]] = None,
set_reply: bool = False,
reply_message: Optional[Dict[str, Any]] = None,
storage_message: bool = True,
selected_expressions:List[int] = None,
) -> bool:
@@ -272,14 +270,15 @@ async def text_to_stream(
stream_id,
"",
typing,
reply_to,
set_reply=set_reply,
reply_message=reply_message,
reply_to_message=reply_to_message,
storage_message=storage_message,
selected_expressions=selected_expressions,
)
async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False,reply_message: Optional[Dict[str, Any]] = None) -> bool:
async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False) -> bool:
"""向指定流发送表情包
Args:
@@ -290,10 +289,10 @@ async def emoji_to_stream(emoji_base64: str, stream_id: str, storage_message: bo
Returns:
bool: 是否发送成功
"""
return await _send_to_target("emoji", emoji_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply,reply_message=reply_message)
return await _send_to_target("emoji", emoji_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply)
async def image_to_stream(image_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False,reply_message: Optional[Dict[str, Any]] = None) -> bool:
async def image_to_stream(image_base64: str, stream_id: str, storage_message: bool = True, set_reply: bool = False) -> bool:
"""向指定流发送图片
Args:
@@ -304,11 +303,11 @@ async def image_to_stream(image_base64: str, stream_id: str, storage_message: bo
Returns:
bool: 是否发送成功
"""
return await _send_to_target("image", image_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply,reply_message=reply_message)
return await _send_to_target("image", image_base64, stream_id, "", typing=False, storage_message=storage_message, set_reply=set_reply)
async def command_to_stream(
command: Union[str, dict], stream_id: str, storage_message: bool = True, display_message: str = "", set_reply: bool = False,reply_message: Optional[Dict[str, Any]] = None
command: Union[str, dict], stream_id: str, storage_message: bool = True, display_message: str = "", set_reply: bool = False
) -> bool:
"""向指定流发送命令
@@ -331,7 +330,8 @@ async def custom_to_stream(
stream_id: str,
display_message: str = "",
typing: bool = False,
reply_message: Optional[Dict[str, Any]] = None,
reply_to: str = "",
reply_to_message: Optional[Dict[str, Any]] = None,
set_reply: bool = False,
storage_message: bool = True,
show_log: bool = True,
@@ -356,7 +356,8 @@ async def custom_to_stream(
stream_id=stream_id,
display_message=display_message,
typing=typing,
reply_message=reply_message,
reply_to=reply_to,
reply_to_message=reply_to_message,
set_reply=set_reply,
storage_message=storage_message,
show_log=show_log,

View File

@@ -48,7 +48,7 @@ class ContentService:
try:
# 获取模型配置
models = llm_api.get_available_models()
text_model = str(self.get_config("models.text_model", "replyer_1"))
text_model = str(self.get_config("models.text_model", "replyer"))
model_config = models.get(text_model)
if not model_config:
@@ -278,7 +278,7 @@ class ContentService:
try:
# 获取模型配置
models = llm_api.get_available_models()
text_model = str(self.get_config("models.text_model", "replyer_1"))
text_model = str(self.get_config("models.text_model", "replyer"))
model_config = models.get(text_model)
if not model_config:

View File

@@ -1,5 +1,5 @@
[inner]
version = "1.2.9"
version = "1.3.0"
# 配置文件版本号迭代规则同bot_config.toml