更多events
This commit is contained in:
@@ -86,6 +86,7 @@ async def generate_reply(
|
||||
return_prompt: bool = False,
|
||||
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
|
||||
request_type: str = "generator_api",
|
||||
from_plugin: bool = True,
|
||||
) -> Tuple[bool, List[Tuple[str, Any]], Optional[str]]:
|
||||
"""生成回复
|
||||
|
||||
@@ -102,12 +103,15 @@ async def generate_reply(
|
||||
return_prompt: 是否返回提示词
|
||||
model_set_with_weight: 模型配置列表,每个元素为 (TaskConfig, weight) 元组
|
||||
request_type: 请求类型(可选,记录LLM使用)
|
||||
from_plugin: 是否来自插件
|
||||
Returns:
|
||||
Tuple[bool, List[Tuple[str, Any]], Optional[str]]: (是否成功, 回复集合, 提示词)
|
||||
"""
|
||||
try:
|
||||
# 获取回复器
|
||||
replyer = get_replyer(chat_stream, chat_id, model_set_with_weight=model_set_with_weight, request_type=request_type)
|
||||
replyer = get_replyer(
|
||||
chat_stream, chat_id, model_set_with_weight=model_set_with_weight, request_type=request_type
|
||||
)
|
||||
if not replyer:
|
||||
logger.error("[GeneratorAPI] 无法获取回复器")
|
||||
return False, [], None
|
||||
@@ -120,20 +124,23 @@ async def generate_reply(
|
||||
extra_info = action_data.get("extra_info", "")
|
||||
|
||||
# 调用回复器生成回复
|
||||
success, content, prompt = await replyer.generate_reply_with_context(
|
||||
success, llm_response_dict, prompt = await replyer.generate_reply_with_context(
|
||||
reply_to=reply_to,
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=enable_tool,
|
||||
from_plugin=from_plugin,
|
||||
stream_id=chat_stream.stream_id if chat_stream else chat_id,
|
||||
)
|
||||
reply_set = []
|
||||
if content:
|
||||
reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
|
||||
if success:
|
||||
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
else:
|
||||
if not success:
|
||||
logger.warning("[GeneratorAPI] 回复生成失败")
|
||||
return False, [], None
|
||||
assert llm_response_dict is not None, "llm_response_dict不应为None" # 虽然说不会出现llm_response为空的情况
|
||||
if content := llm_response_dict.get("content", ""):
|
||||
reply_set = process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
else:
|
||||
reply_set = []
|
||||
logger.debug(f"[GeneratorAPI] 回复生成成功,生成了 {len(reply_set)} 个回复项")
|
||||
|
||||
if return_prompt:
|
||||
return success, reply_set, prompt
|
||||
@@ -143,6 +150,10 @@ async def generate_reply(
|
||||
except ValueError as ve:
|
||||
raise ve
|
||||
|
||||
except UserWarning as uw:
|
||||
logger.warning(f"[GeneratorAPI] 中断了生成: {uw}")
|
||||
return False, [], None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[GeneratorAPI] 生成回复时出错: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
@@ -202,7 +213,7 @@ async def rewrite_reply(
|
||||
)
|
||||
reply_set = []
|
||||
if content:
|
||||
reply_set = await process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
reply_set = process_human_text(content, enable_splitter, enable_chinese_typo)
|
||||
|
||||
if success:
|
||||
logger.info(f"[GeneratorAPI] 重写回复成功,生成了 {len(reply_set)} 个回复项")
|
||||
@@ -219,7 +230,7 @@ async def rewrite_reply(
|
||||
return False, [], None
|
||||
|
||||
|
||||
async def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> List[Tuple[str, Any]]:
|
||||
def process_human_text(content: str, enable_splitter: bool, enable_chinese_typo: bool) -> List[Tuple[str, Any]]:
|
||||
"""将文本处理为更拟人化的文本
|
||||
|
||||
Args:
|
||||
@@ -243,6 +254,7 @@ async def process_human_text(content: str, enable_splitter: bool, enable_chinese
|
||||
logger.error(f"[GeneratorAPI] 处理人形文本时出错: {e}")
|
||||
return []
|
||||
|
||||
|
||||
async def generate_response_custom(
|
||||
chat_stream: Optional[ChatStream] = None,
|
||||
chat_id: Optional[str] = None,
|
||||
@@ -265,4 +277,4 @@ async def generate_response_custom(
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"[GeneratorAPI] 生成自定义回复时出错: {e}")
|
||||
return None
|
||||
return None
|
||||
|
||||
@@ -4,6 +4,7 @@ from dataclasses import dataclass, field
|
||||
from maim_message import Seg
|
||||
|
||||
from src.llm_models.payload_content.tool_option import ToolParamType as ToolParamType
|
||||
from src.llm_models.payload_content.tool_option import ToolCall as ToolCall
|
||||
|
||||
# 组件类型枚举
|
||||
class ComponentType(Enum):
|
||||
@@ -259,8 +260,20 @@ class MaiMessages:
|
||||
llm_prompt: Optional[str] = None
|
||||
"""LLM提示词"""
|
||||
|
||||
llm_response: Optional[str] = None
|
||||
llm_response_content: Optional[str] = None
|
||||
"""LLM响应内容"""
|
||||
|
||||
llm_response_reasoning: Optional[str] = None
|
||||
"""LLM响应推理内容"""
|
||||
|
||||
llm_response_model: Optional[str] = None
|
||||
"""LLM响应模型名称"""
|
||||
|
||||
llm_response_tool_call: Optional[List[ToolCall]] = None
|
||||
"""LLM使用的工具调用"""
|
||||
|
||||
action_usage: Optional[List[str]] = None
|
||||
"""使用的Action"""
|
||||
|
||||
additional_data: Dict[Any, Any] = field(default_factory=dict)
|
||||
"""附加数据,可以存储额外信息"""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import contextlib
|
||||
from typing import List, Dict, Optional, Type, Tuple
|
||||
from typing import List, Dict, Optional, Type, Tuple, Any
|
||||
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
@@ -47,8 +47,9 @@ class EventsManager:
|
||||
event_type: EventType,
|
||||
message: Optional[MessageRecv] = None,
|
||||
llm_prompt: Optional[str] = None,
|
||||
llm_response: Optional[str] = None,
|
||||
llm_response: Optional[Dict[str, Any]] = None,
|
||||
stream_id: Optional[str] = None,
|
||||
action_usage: Optional[List[str]] = None,
|
||||
) -> bool:
|
||||
"""处理 events"""
|
||||
from src.plugin_system.core import component_registry
|
||||
@@ -57,7 +58,12 @@ class EventsManager:
|
||||
transformed_message: Optional[MaiMessages] = None
|
||||
if not message:
|
||||
assert stream_id, "如果没有消息,必须提供流ID"
|
||||
transformed_message = self._build_message_from_stream(stream_id, llm_prompt, llm_response)
|
||||
if event_type in [EventType.ON_MESSAGE, EventType.ON_PLAN, EventType.POST_LLM, EventType.AFTER_LLM]:
|
||||
transformed_message = self._build_message_from_stream(stream_id, llm_prompt, llm_response)
|
||||
else:
|
||||
transformed_message = self._transform_event_without_message(
|
||||
stream_id, llm_prompt, llm_response, action_usage
|
||||
)
|
||||
else:
|
||||
transformed_message = self._transform_event_message(message, llm_prompt, llm_response)
|
||||
for handler in self._events_subscribers.get(event_type, []):
|
||||
@@ -121,13 +127,16 @@ class EventsManager:
|
||||
return False
|
||||
|
||||
def _transform_event_message(
|
||||
self, message: MessageRecv, llm_prompt: Optional[str] = None, llm_response: Optional[str] = None
|
||||
self, message: MessageRecv, llm_prompt: Optional[str] = None, llm_response: Optional[Dict[str, Any]] = None
|
||||
) -> MaiMessages:
|
||||
"""转换事件消息格式"""
|
||||
# 直接赋值部分内容
|
||||
transformed_message = MaiMessages(
|
||||
llm_prompt=llm_prompt,
|
||||
llm_response=llm_response,
|
||||
llm_response_content=llm_response.get("content") if llm_response else None,
|
||||
llm_response_reasoning=llm_response.get("reasoning") if llm_response else None,
|
||||
llm_response_model=llm_response.get("model") if llm_response else None,
|
||||
llm_response_tool_call=llm_response.get("tool_calls") if llm_response else None,
|
||||
raw_message=message.raw_message,
|
||||
additional_data=message.message_info.additional_config or {},
|
||||
)
|
||||
@@ -171,7 +180,7 @@ class EventsManager:
|
||||
return transformed_message
|
||||
|
||||
def _build_message_from_stream(
|
||||
self, stream_id: str, llm_prompt: Optional[str] = None, llm_response: Optional[str] = None
|
||||
self, stream_id: str, llm_prompt: Optional[str] = None, llm_response: Optional[Dict[str, Any]] = None
|
||||
) -> MaiMessages:
|
||||
"""从流ID构建消息"""
|
||||
chat_stream = get_chat_manager().get_stream(stream_id)
|
||||
@@ -179,6 +188,29 @@ class EventsManager:
|
||||
message = chat_stream.context.get_last_message()
|
||||
return self._transform_event_message(message, llm_prompt, llm_response)
|
||||
|
||||
def _transform_event_without_message(
|
||||
self,
|
||||
stream_id: str,
|
||||
llm_prompt: Optional[str] = None,
|
||||
llm_response: Optional[Dict[str, Any]] = None,
|
||||
action_usage: Optional[List[str]] = None,
|
||||
) -> MaiMessages:
|
||||
"""没有message对象时进行转换"""
|
||||
chat_stream = get_chat_manager().get_stream(stream_id)
|
||||
assert chat_stream, f"未找到流ID为 {stream_id} 的聊天流"
|
||||
return MaiMessages(
|
||||
stream_id=stream_id,
|
||||
llm_prompt=llm_prompt,
|
||||
llm_response_content=(llm_response.get("content") if llm_response else None),
|
||||
llm_response_reasoning=(llm_response.get("reasoning") if llm_response else None),
|
||||
llm_response_model=llm_response.get("model") if llm_response else None,
|
||||
llm_response_tool_call=(llm_response.get("tool_calls") if llm_response else None),
|
||||
is_group_message=(not (not chat_stream.group_info)),
|
||||
is_private_message=(not chat_stream.group_info),
|
||||
action_usage=action_usage,
|
||||
additional_data={"response_is_processed": True},
|
||||
)
|
||||
|
||||
def _task_done_callback(self, task: asyncio.Task[Tuple[bool, bool, str | None]]):
|
||||
"""任务完成回调"""
|
||||
task_name = task.get_name() or "Unknown Task"
|
||||
|
||||
Reference in New Issue
Block a user