更多events
This commit is contained in:
@@ -389,7 +389,10 @@ class HeartFChatting:
|
||||
chat_target_info=planner_info[1],
|
||||
current_available_actions=planner_info[2],
|
||||
)
|
||||
await events_manager.handle_mai_events(EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id)
|
||||
if not await events_manager.handle_mai_events(
|
||||
EventType.ON_PLAN, None, prompt_info[0], None, self.chat_stream.stream_id
|
||||
):
|
||||
return False
|
||||
with Timer("规划器", cycle_timers):
|
||||
plan_result, target_message = await self.action_planner.plan(mode=self.loop_mode)
|
||||
|
||||
@@ -761,6 +764,7 @@ class HeartFChatting:
|
||||
available_actions=available_actions,
|
||||
enable_tool=global_config.tool.enable_tool,
|
||||
request_type=request_type,
|
||||
from_plugin=False,
|
||||
)
|
||||
|
||||
if not success or not reply_set:
|
||||
|
||||
@@ -29,9 +29,10 @@ from src.chat.memory_system.instant_memory import InstantMemory
|
||||
from src.mood.mood_manager import mood_manager
|
||||
from src.person_info.relationship_fetcher import relationship_fetcher_manager
|
||||
from src.person_info.person_info import get_person_info_manager
|
||||
from src.plugin_system.base.component_types import ActionInfo
|
||||
from src.plugin_system.base.component_types import ActionInfo, EventType
|
||||
from src.plugin_system.apis import llm_api
|
||||
|
||||
|
||||
logger = get_logger("replyer")
|
||||
|
||||
|
||||
@@ -179,7 +180,10 @@ class DefaultReplyer:
|
||||
extra_info: str = "",
|
||||
available_actions: Optional[Dict[str, ActionInfo]] = None,
|
||||
enable_tool: bool = True,
|
||||
) -> Tuple[bool, Optional[str], Optional[str]]:
|
||||
from_plugin: bool = True,
|
||||
stream_id: Optional[str] = None,
|
||||
) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str]]:
|
||||
# sourcery skip: merge-nested-ifs
|
||||
"""
|
||||
回复器 (Replier): 负责生成回复文本的核心逻辑。
|
||||
|
||||
@@ -188,9 +192,10 @@ class DefaultReplyer:
|
||||
extra_info: 额外信息,用于补充上下文
|
||||
available_actions: 可用的动作信息字典
|
||||
enable_tool: 是否启用工具调用
|
||||
from_plugin: 是否来自插件
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Optional[str], Optional[str]]: (是否成功, 生成的回复内容, 使用的prompt)
|
||||
Tuple[bool, Optional[Dict[str, Any]], Optional[str]]: (是否成功, 生成的回复, 使用的prompt)
|
||||
"""
|
||||
prompt = None
|
||||
if available_actions is None:
|
||||
@@ -208,6 +213,13 @@ class DefaultReplyer:
|
||||
if not prompt:
|
||||
logger.warning("构建prompt失败,跳过回复生成")
|
||||
return False, None, None
|
||||
from src.plugin_system.core.events_manager import events_manager
|
||||
|
||||
if not from_plugin:
|
||||
if not await events_manager.handle_mai_events(
|
||||
EventType.POST_LLM, None, prompt, None, stream_id=stream_id
|
||||
):
|
||||
raise UserWarning("插件于请求前中断了内容生成")
|
||||
|
||||
# 4. 调用 LLM 生成回复
|
||||
content = None
|
||||
@@ -215,16 +227,29 @@ class DefaultReplyer:
|
||||
model_name = "unknown_model"
|
||||
|
||||
try:
|
||||
content, reasoning_content, model_name, _ = await self.llm_generate_content(prompt)
|
||||
content, reasoning_content, model_name, tool_call = await self.llm_generate_content(prompt)
|
||||
logger.debug(f"replyer生成内容: {content}")
|
||||
|
||||
llm_response = {
|
||||
"content": content,
|
||||
"reasoning": reasoning_content,
|
||||
"model": model_name,
|
||||
"tool_calls": tool_call,
|
||||
}
|
||||
if not from_plugin and not await events_manager.handle_mai_events(
|
||||
EventType.AFTER_LLM, None, prompt, llm_response, stream_id=stream_id
|
||||
):
|
||||
raise UserWarning("插件于请求后取消了内容生成")
|
||||
except UserWarning as e:
|
||||
raise e
|
||||
except Exception as llm_e:
|
||||
# 精简报错信息
|
||||
logger.error(f"LLM 生成失败: {llm_e}")
|
||||
return False, None, prompt # LLM 调用失败则无法生成回复
|
||||
|
||||
return True, content, prompt
|
||||
return True, llm_response, prompt
|
||||
|
||||
except UserWarning as uw:
|
||||
raise uw
|
||||
except Exception as e:
|
||||
logger.error(f"回复生成意外失败: {e}")
|
||||
traceback.print_exc()
|
||||
@@ -1022,6 +1047,7 @@ class DefaultReplyer:
|
||||
related_info = ""
|
||||
start_time = time.time()
|
||||
from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool
|
||||
|
||||
if not reply_to:
|
||||
logger.debug("没有回复对象,跳过获取知识库内容")
|
||||
return ""
|
||||
|
||||
@@ -53,9 +53,6 @@ class LLMRequest:
|
||||
}
|
||||
"""模型使用量记录,用于进行负载均衡,对应为(total_tokens, penalty, usage_penalty),惩罚值是为了能在某个模型请求不给力或正在被使用的时候进行调整"""
|
||||
|
||||
self.pri_in = 0
|
||||
self.pri_out = 0
|
||||
|
||||
async def generate_response_for_image(
|
||||
self,
|
||||
prompt: str,
|
||||
|
||||
@@ -86,6 +86,7 @@ async def generate_reply(
|
||||
return_prompt: bool = False,
|
||||
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
|
||||
request_type: str = "generator_api",
|
||||
from_plugin: bool = True,
|
||||
) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]:
|
||||
"""生成回复
|
||||
|
||||
@@ -102,12 +103,15 @@ async def generate_reply(
|
||||
return_prompt: 是否返回提示词
|
||||
model_set_with_weight: 模型配置列表,每个元素为 (TaskConfig, weight) 元组
|
||||
request_type: 请求类型(可选,记录LLM使用)
|
||||
from_plugin: 是否来自插件
|
||||
Returns:
|
||||
Tuple[bool, List[Tuple[str, Any]], Optional[str]]: (是否成功, 回复集合, 提示词)
|
||||
"""
|
||||
try:
|
||||
# 获取回复器
|
||||
replyer = get_replyer(chat_stream, chat_id, model_set_with_weight=model_set_with_weight, request_type=request_type)
|
||||
replyer = get_replyer(
|
||||
chat_stream, chat_id, model_set_with_weight=model_set_with_weight, request_type=request_type
|
||||
)
|
||||
if not replyer:
|
||||
logger.error("[GeneratorAPI] 无法获取回复器")
|
||||
return False, [], None
|
||||
@@ -120,20 +124,23 @@ async def generate_reply(
|
||||
extra_info = action_data.get("extra_info", "")
|
||||
|
||||
# 调用回复器生成回复
|
||||
success, content, prompt = await replyer.generate_reply_with_context(
|
||||
success, llm_response_dict, prompt = await replyer.generate_reply_with_context(
|
||||
reply_to=reply_to,
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=enable_tool,
|
||||
from_plugin=from_plugin,
|
||||
stream_id=chat_stream.stream_id if chat_stream else chat_id,
|
||||
)
|
||||
reply_set = []
|
||||
if content:
|
||||
reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
|
||||
if success:
|
||||
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
else:
|
||||
if not success:
|
||||
logger.warning("[GeneratorAPI] 回复生成失败")
|
||||
return False, [], None
|
||||
assert llm_response_dict is not None, "llm_response_dict不应为None" # 虽然说不会出现llm_response为空的情况
|
||||
if content := llm_response_dict.get("content", ""):
|
||||
reply_set = process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
else:
|
||||
reply_set = []
|
||||
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
|
||||
if return_prompt:
|
||||
return success, reply_set, prompt
|
||||
@@ -143,6 +150,10 @@ async def generate_reply(
|
||||
except ValueError as ve:
|
||||
raise ve
|
||||
|
||||
except UserWarning as uw:
|
||||
logger.warning(f"[GeneratorAPI] 中断了生成: {uw}")
|
||||
return False, [], None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[GeneratorAPI] 生成回复时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -202,7 +213,7 @@ async def rewrite_reply(
|
||||
)
|
||||
reply_set = []
|
||||
if content:
|
||||
reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
reply_set = process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
|
||||
if success:
|
||||
logger.info(f"[GeneratorAPI] 重写回复成功,生成了 {len(reply_set)} 个回复项")
|
||||
@@ -219,7 +230,7 @@ async def rewrite_reply(
|
||||
return False, [], None
|
||||
|
||||
|
||||
async def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> List[Tuple[str, Any]]:
|
||||
def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> List[Tuple[str, Any]]:
|
||||
"""将文本处理为更拟人化的文本
|
||||
|
||||
Args:
|
||||
@@ -243,6 +254,7 @@ async def process_human_text(content: str, enable_splitter: bool, enable_chinese
|
||||
logger.error(f"[GeneratorAPI] 处理人形文本时出错: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def generate_response_custom(
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
|
||||
@@ -4,6 +4,7 @@ from dataclasses import dataclass, field
|
||||
from maim_message import Seg
|
||||
|
||||
from src.llm_models.payload_content.tool_option import ToolParamType as ToolParamType
|
||||
from src.llm_models.payload_content.tool_option import ToolCall as ToolCall
|
||||
|
||||
# 组件类型枚举
|
||||
class ComponentType(Enum):
|
||||
@@ -259,9 +260,21 @@ class MaiMessages:
|
||||
llm_prompt: Optional[str] = None
|
||||
"""LLM提示词"""
|
||||
|
||||
llm_response: Optional[str] = None
|
||||
llm_response_content: Optional[str] = None
|
||||
"""LLM响应内容"""
|
||||
|
||||
llm_response_reasoning: Optional[str] = None
|
||||
"""LLM响应推理内容"""
|
||||
|
||||
llm_response_model: Optional[str] = None
|
||||
"""LLM响应模型名称"""
|
||||
|
||||
llm_response_tool_call: Optional[List[ToolCall]] = None
|
||||
"""LLM使用的工具调用"""
|
||||
|
||||
action_usage: Optional[List[str]] = None
|
||||
"""使用的Action"""
|
||||
|
||||
additional_data: Dict[Any, Any] = field(default_factory=dict)
|
||||
"""附加数据,可以存储额外信息"""
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
from typing import List, Dict, Optional, Type, Tuple
|
||||
from typing import List, Dict, Optional, Type, Tuple, Any
|
||||
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
@@ -47,8 +47,9 @@ class EventsManager:
|
||||
event_type: EventType,
|
||||
message: Optional[MessageRecv] = None,
|
||||
llm_prompt: Optional[str] = None,
|
||||
llm_response: Optional[str] = None,
|
||||
llm_response: Optional[Dict[str, Any]] = None,
|
||||
stream_id: Optional[str] = None,
|
||||
action_usage: Optional[List[str]] = None,
|
||||
) -> bool:
|
||||
"""处理 events"""
|
||||
from src.plugin_system.core import component_registry
|
||||
@@ -57,7 +58,12 @@ class EventsManager:
|
||||
transformed_message: Optional[MaiMessages] = None
|
||||
if not message:
|
||||
assert stream_id, "如果没有消息,必须提供流ID"
|
||||
transformed_message = self._build_message_from_stream(stream_id, llm_prompt, llm_response)
|
||||
if event_type in [EventType.ON_MESSAGE, EventType.ON_PLAN, EventType.POST_LLM, EventType.AFTER_LLM]:
|
||||
transformed_message = self._build_message_from_stream(stream_id, llm_prompt, llm_response)
|
||||
else:
|
||||
transformed_message = self._transform_event_without_message(
|
||||
stream_id, llm_prompt, llm_response, action_usage
|
||||
)
|
||||
else:
|
||||
transformed_message = self._transform_event_message(message, llm_prompt, llm_response)
|
||||
for handler in self._events_subscribers.get(event_type, []):
|
||||
@@ -121,13 +127,16 @@ class EventsManager:
|
||||
return False
|
||||
|
||||
def _transform_event_message(
|
||||
self, message: MessageRecv, llm_prompt: Optional[str] = None, llm_response: Optional[str] = None
|
||||
self, message: MessageRecv, llm_prompt: Optional[str] = None, llm_response: Optional[Dict[str, Any]] = None
|
||||
) -> MaiMessages:
|
||||
"""转换事件消息格式"""
|
||||
# 直接赋值部分内容
|
||||
transformed_message = MaiMessages(
|
||||
llm_prompt=llm_prompt,
|
||||
llm_response=llm_response,
|
||||
llm_response_content=llm_response.get("content") if llm_response else None,
|
||||
llm_response_reasoning=llm_response.get("reasoning") if llm_response else None,
|
||||
llm_response_model=llm_response.get("model") if llm_response else None,
|
||||
llm_response_tool_call=llm_response.get("tool_calls") if llm_response else None,
|
||||
raw_message=message.raw_message,
|
||||
additional_data=message.message_info.additional_config or {},
|
||||
)
|
||||
@@ -171,7 +180,7 @@ class EventsManager:
|
||||
return transformed_message
|
||||
|
||||
def _build_message_from_stream(
|
||||
self, stream_id: str, llm_prompt: Optional[str] = None, llm_response: Optional[str] = None
|
||||
self, stream_id: str, llm_prompt: Optional[str] = None, llm_response: Optional[Dict[str, Any]] = None
|
||||
) -> MaiMessages:
|
||||
"""从流ID构建消息"""
|
||||
chat_stream = get_chat_manager().get_stream(stream_id)
|
||||
@@ -179,6 +188,29 @@ class EventsManager:
|
||||
message = chat_stream.context.get_last_message()
|
||||
return self._transform_event_message(message, llm_prompt, llm_response)
|
||||
|
||||
def _transform_event_without_message(
|
||||
self,
|
||||
stream_id: str,
|
||||
llm_prompt: Optional[str] = None,
|
||||
llm_response: Optional[Dict[str, Any]] = None,
|
||||
action_usage: Optional[List[str]] = None,
|
||||
) -> MaiMessages:
|
||||
"""没有message对象时进行转换"""
|
||||
chat_stream = get_chat_manager().get_stream(stream_id)
|
||||
assert chat_stream, f"未找到流ID为 {stream_id} 的聊天流"
|
||||
return MaiMessages(
|
||||
stream_id=stream_id,
|
||||
llm_prompt=llm_prompt,
|
||||
llm_response_content=(llm_response.get("content") if llm_response else None),
|
||||
llm_response_reasoning=(llm_response.get("reasoning") if llm_response else None),
|
||||
llm_response_model=llm_response.get("model") if llm_response else None,
|
||||
llm_response_tool_call=(llm_response.get("tool_calls") if llm_response else None),
|
||||
is_group_message=(not (not chat_stream.group_info)),
|
||||
is_private_message=(not chat_stream.group_info),
|
||||
action_usage=action_usage,
|
||||
additional_data={"response_is_processed": True},
|
||||
)
|
||||
|
||||
def _task_done_callback(self, task: asyncio.Task[Tuple[bool, bool, str | None]]):
|
||||
"""任务完成回调"""
|
||||
task_name = task.get_name() or "Unknown Task"
|
||||
|
||||
Reference in New Issue
Block a user