更好的计时器小工具,感谢D指导
This commit is contained in:
@@ -19,6 +19,7 @@ from ...chat.chat_stream import chat_manager
|
||||
from ...person_info.relationship_manager import relationship_manager
|
||||
from ...chat.message_buffer import message_buffer
|
||||
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from ...utils.timer_calculater import Timer
|
||||
|
||||
# 定义日志配置
|
||||
chat_config = LogConfig(
|
||||
@@ -173,12 +174,10 @@ class ReasoningChat:
|
||||
await self.storage.store_message(message, chat)
|
||||
|
||||
# 记忆激活
|
||||
timer1 = time.time()
|
||||
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
|
||||
message.processed_plain_text, fast_retrieval=True
|
||||
)
|
||||
timer2 = time.time()
|
||||
timing_results["记忆激活"] = timer2 - timer1
|
||||
with Timer("记忆激活", timing_results):
|
||||
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
|
||||
message.processed_plain_text, fast_retrieval=True
|
||||
)
|
||||
|
||||
# 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text
|
||||
buffer_result = await message_buffer.query_buffer_result(message)
|
||||
@@ -228,10 +227,8 @@ class ReasoningChat:
|
||||
await willing_manager.before_generate_reply_handle(message.message_info.message_id)
|
||||
|
||||
# 创建思考消息
|
||||
timer1 = time.time()
|
||||
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
|
||||
timer2 = time.time()
|
||||
timing_results["创建思考消息"] = timer2 - timer1
|
||||
with Timer("创建思考消息", timing_results):
|
||||
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
|
||||
|
||||
logger.debug(f"创建捕捉器,thinking_id:{thinking_id}")
|
||||
|
||||
@@ -239,11 +236,9 @@ class ReasoningChat:
|
||||
info_catcher.catch_decide_to_response(message)
|
||||
|
||||
# 生成回复
|
||||
timer1 = time.time()
|
||||
try:
|
||||
response_set = await self.gpt.generate_response(message, thinking_id)
|
||||
timer2 = time.time()
|
||||
timing_results["生成回复"] = timer2 - timer1
|
||||
with Timer("生成回复", timing_results):
|
||||
response_set = await self.gpt.generate_response(message, thinking_id)
|
||||
|
||||
info_catcher.catch_after_generate_response(timing_results["生成回复"])
|
||||
except Exception as e:
|
||||
@@ -255,26 +250,20 @@ class ReasoningChat:
|
||||
return
|
||||
|
||||
# 发送消息
|
||||
timer1 = time.time()
|
||||
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
|
||||
timer2 = time.time()
|
||||
timing_results["发送消息"] = timer2 - timer1
|
||||
with Timer("发送消息", timing_results):
|
||||
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
|
||||
|
||||
info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
|
||||
|
||||
info_catcher.done_catch()
|
||||
|
||||
# 处理表情包
|
||||
timer1 = time.time()
|
||||
await self._handle_emoji(message, chat, response_set)
|
||||
timer2 = time.time()
|
||||
timing_results["处理表情包"] = timer2 - timer1
|
||||
with Timer("处理表情包", timing_results):
|
||||
await self._handle_emoji(message, chat, response_set)
|
||||
|
||||
# 更新关系情绪
|
||||
timer1 = time.time()
|
||||
await self._update_relationship(message, response_set)
|
||||
timer2 = time.time()
|
||||
timing_results["更新关系情绪"] = timer2 - timer1
|
||||
with Timer("更新关系情绪", timing_results):
|
||||
await self._update_relationship(message, response_set)
|
||||
|
||||
# 回复后处理
|
||||
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
|
||||
|
||||
@@ -7,6 +7,7 @@ from ...config.config import global_config
|
||||
from ...chat.message import MessageThinking
|
||||
from .reasoning_prompt_builder import prompt_builder
|
||||
from ...chat.utils import process_llm_response
|
||||
from ...utils.timer_calculater import Timer
|
||||
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
|
||||
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
|
||||
@@ -38,7 +39,7 @@ class ResponseGenerator:
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
self.current_model_name = "unknown model"
|
||||
|
||||
async def generate_response(self, message: MessageThinking,thinking_id:str) -> Optional[Union[str, List[str]]]:
|
||||
async def generate_response(self, message: MessageThinking, thinking_id: str) -> Optional[Union[str, List[str]]]:
|
||||
"""根据当前模型类型选择对应的生成函数"""
|
||||
# 从global_config中获取模型概率值并选择模型
|
||||
if random.random() < global_config.MODEL_R1_PROBABILITY:
|
||||
@@ -52,7 +53,7 @@ class ResponseGenerator:
|
||||
f"{self.current_model_type}思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||
) # noqa: E501
|
||||
|
||||
model_response = await self._generate_response_with_model(message, current_model,thinking_id)
|
||||
model_response = await self._generate_response_with_model(message, current_model, thinking_id)
|
||||
|
||||
# print(f"raw_content: {model_response}")
|
||||
|
||||
@@ -65,11 +66,11 @@ class ResponseGenerator:
|
||||
logger.info(f"{self.current_model_type}思考,失败")
|
||||
return None
|
||||
|
||||
async def _generate_response_with_model(self, message: MessageThinking, model: LLM_request,thinking_id:str):
|
||||
async def _generate_response_with_model(self, message: MessageThinking, model: LLM_request, thinking_id: str):
|
||||
sender_name = ""
|
||||
|
||||
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
|
||||
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
sender_name = (
|
||||
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
|
||||
@@ -82,26 +83,22 @@ class ResponseGenerator:
|
||||
|
||||
logger.debug("开始使用生成回复-2")
|
||||
# 构建prompt
|
||||
timer1 = time.time()
|
||||
prompt = await prompt_builder._build_prompt(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
timer2 = time.time()
|
||||
logger.info(f"构建prompt时间: {timer2 - timer1}秒")
|
||||
with Timer() as t_build_prompt:
|
||||
prompt = await prompt_builder._build_prompt(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
logger.info(f"构建prompt时间: {t_build_prompt.human_readable()}")
|
||||
|
||||
try:
|
||||
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||
|
||||
|
||||
info_catcher.catch_after_llm_generated(
|
||||
prompt=prompt,
|
||||
response=content,
|
||||
reasoning_content=reasoning_content,
|
||||
model_name=self.current_model_name)
|
||||
|
||||
|
||||
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception("生成回复时出错")
|
||||
return None
|
||||
@@ -118,7 +115,6 @@ class ResponseGenerator:
|
||||
|
||||
return content
|
||||
|
||||
|
||||
# def _save_to_db(
|
||||
# self,
|
||||
# message: MessageRecv,
|
||||
|
||||
@@ -20,6 +20,7 @@ from ...chat.chat_stream import chat_manager
|
||||
from ...person_info.relationship_manager import relationship_manager
|
||||
from ...chat.message_buffer import message_buffer
|
||||
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from ...utils.timer_calculater import Timer
|
||||
|
||||
# 定义日志配置
|
||||
chat_config = LogConfig(
|
||||
@@ -59,11 +60,7 @@ class ThinkFlowChat:
|
||||
|
||||
return thinking_id
|
||||
|
||||
async def _send_response_messages(self,
|
||||
message,
|
||||
chat,
|
||||
response_set:List[str],
|
||||
thinking_id) -> MessageSending:
|
||||
async def _send_response_messages(self, message, chat, response_set: List[str], thinking_id) -> MessageSending:
|
||||
"""发送回复消息"""
|
||||
container = message_manager.get_container(chat.stream_id)
|
||||
thinking_message = None
|
||||
@@ -200,12 +197,10 @@ class ThinkFlowChat:
|
||||
logger.debug(f"存储成功{message.processed_plain_text}")
|
||||
|
||||
# 记忆激活
|
||||
timer1 = time.time()
|
||||
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
|
||||
message.processed_plain_text, fast_retrieval=True
|
||||
)
|
||||
timer2 = time.time()
|
||||
timing_results["记忆激活"] = timer2 - timer1
|
||||
with Timer("记忆激活", timing_results):
|
||||
interested_rate = await HippocampusManager.get_instance().get_activate_from_text(
|
||||
message.processed_plain_text, fast_retrieval=True
|
||||
)
|
||||
logger.debug(f"记忆激活: {interested_rate}")
|
||||
|
||||
# 查询缓冲器结果,会整合前面跳过的消息,改变processed_plain_text
|
||||
@@ -260,103 +255,85 @@ class ThinkFlowChat:
|
||||
if random() < reply_probability:
|
||||
try:
|
||||
do_reply = True
|
||||
|
||||
|
||||
|
||||
# 回复前处理
|
||||
await willing_manager.before_generate_reply_handle(message.message_info.message_id)
|
||||
|
||||
# 创建思考消息
|
||||
try:
|
||||
timer1 = time.time()
|
||||
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
|
||||
timer2 = time.time()
|
||||
timing_results["创建思考消息"] = timer2 - timer1
|
||||
with Timer("创建思考消息", timing_results):
|
||||
thinking_id = await self._create_thinking_message(message, chat, userinfo, messageinfo)
|
||||
except Exception as e:
|
||||
logger.error(f"心流创建思考消息失败: {e}")
|
||||
|
||||
|
||||
logger.debug(f"创建捕捉器,thinking_id:{thinking_id}")
|
||||
|
||||
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
info_catcher.catch_decide_to_response(message)
|
||||
|
||||
try:
|
||||
# 观察
|
||||
timer1 = time.time()
|
||||
await heartflow.get_subheartflow(chat.stream_id).do_observe()
|
||||
timer2 = time.time()
|
||||
timing_results["观察"] = timer2 - timer1
|
||||
with Timer("观察", timing_results):
|
||||
await heartflow.get_subheartflow(chat.stream_id).do_observe()
|
||||
except Exception as e:
|
||||
logger.error(f"心流观察失败: {e}")
|
||||
|
||||
|
||||
info_catcher.catch_after_observe(timing_results["观察"])
|
||||
|
||||
# 思考前脑内状态
|
||||
try:
|
||||
timer1 = time.time()
|
||||
current_mind,past_mind = await heartflow.get_subheartflow(chat.stream_id).do_thinking_before_reply(
|
||||
message_txt = message.processed_plain_text,
|
||||
sender_name = message.message_info.user_info.user_nickname,
|
||||
chat_stream = chat
|
||||
)
|
||||
timer2 = time.time()
|
||||
timing_results["思考前脑内状态"] = timer2 - timer1
|
||||
with Timer("思考前脑内状态", timing_results):
|
||||
current_mind, past_mind = await heartflow.get_subheartflow(
|
||||
chat.stream_id
|
||||
).do_thinking_before_reply(
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=message.message_info.user_info.user_nickname,
|
||||
chat_stream=chat,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"心流思考前脑内状态失败: {e}")
|
||||
|
||||
info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"],past_mind,current_mind)
|
||||
|
||||
info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"], past_mind, current_mind)
|
||||
|
||||
# 生成回复
|
||||
timer1 = time.time()
|
||||
response_set = await self.gpt.generate_response(message,thinking_id)
|
||||
timer2 = time.time()
|
||||
timing_results["生成回复"] = timer2 - timer1
|
||||
with Timer("生成回复", timing_results):
|
||||
response_set = await self.gpt.generate_response(message, thinking_id)
|
||||
|
||||
info_catcher.catch_after_generate_response(timing_results["生成回复"])
|
||||
|
||||
|
||||
if not response_set:
|
||||
logger.info("回复生成失败,返回为空")
|
||||
return
|
||||
|
||||
# 发送消息
|
||||
try:
|
||||
timer1 = time.time()
|
||||
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
|
||||
timer2 = time.time()
|
||||
timing_results["发送消息"] = timer2 - timer1
|
||||
with Timer("发送消息", timing_results):
|
||||
first_bot_msg = await self._send_response_messages(message, chat, response_set, thinking_id)
|
||||
except Exception as e:
|
||||
logger.error(f"心流发送消息失败: {e}")
|
||||
|
||||
|
||||
info_catcher.catch_after_response(timing_results["发送消息"],response_set,first_bot_msg)
|
||||
|
||||
|
||||
|
||||
info_catcher.catch_after_response(timing_results["发送消息"], response_set, first_bot_msg)
|
||||
|
||||
info_catcher.done_catch()
|
||||
|
||||
# 处理表情包
|
||||
try:
|
||||
timer1 = time.time()
|
||||
await self._handle_emoji(message, chat, response_set)
|
||||
timer2 = time.time()
|
||||
timing_results["处理表情包"] = timer2 - timer1
|
||||
with Timer("处理表情包", timing_results):
|
||||
await self._handle_emoji(message, chat, response_set)
|
||||
except Exception as e:
|
||||
logger.error(f"心流处理表情包失败: {e}")
|
||||
|
||||
# 更新心流
|
||||
try:
|
||||
timer1 = time.time()
|
||||
await self._update_using_response(message, response_set)
|
||||
timer2 = time.time()
|
||||
timing_results["更新心流"] = timer2 - timer1
|
||||
with Timer("更新心流", timing_results):
|
||||
await self._update_using_response(message, response_set)
|
||||
except Exception as e:
|
||||
logger.error(f"心流更新失败: {e}")
|
||||
|
||||
# 更新关系情绪
|
||||
try:
|
||||
timer1 = time.time()
|
||||
await self._update_relationship(message, response_set)
|
||||
timer2 = time.time()
|
||||
timing_results["更新关系情绪"] = timer2 - timer1
|
||||
with Timer("更新关系情绪", timing_results):
|
||||
await self._update_relationship(message, response_set)
|
||||
except Exception as e:
|
||||
logger.error(f"心流更新关系情绪失败: {e}")
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import time
|
||||
from typing import List, Optional
|
||||
import random
|
||||
|
||||
@@ -10,6 +9,7 @@ from .think_flow_prompt_builder import prompt_builder
|
||||
from ...chat.utils import process_llm_response
|
||||
from src.common.logger import get_module_logger, LogConfig, LLM_STYLE_CONFIG
|
||||
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
|
||||
from ...utils.timer_calculater import Timer
|
||||
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
|
||||
@@ -35,44 +35,50 @@ class ResponseGenerator:
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
self.current_model_name = "unknown model"
|
||||
|
||||
async def generate_response(self, message: MessageRecv,thinking_id:str) -> Optional[List[str]]:
|
||||
async def generate_response(self, message: MessageRecv, thinking_id: str) -> Optional[List[str]]:
|
||||
"""根据当前模型类型选择对应的生成函数"""
|
||||
|
||||
|
||||
logger.info(
|
||||
f"思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
|
||||
)
|
||||
|
||||
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
|
||||
|
||||
time1 = time.time()
|
||||
|
||||
checked = False
|
||||
if random.random() > 0:
|
||||
checked = False
|
||||
current_model = self.model_normal
|
||||
current_model.temperature = 0.3 * arousal_multiplier #激活度越高,温度越高
|
||||
model_response = await self._generate_response_with_model(message, current_model,thinking_id,mode="normal")
|
||||
|
||||
model_checked_response = model_response
|
||||
else:
|
||||
checked = True
|
||||
current_model = self.model_normal
|
||||
current_model.temperature = 0.3 * arousal_multiplier #激活度越高,温度越高
|
||||
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
|
||||
model_response = await self._generate_response_with_model(message, current_model,thinking_id,mode="simple")
|
||||
|
||||
current_model.temperature = 0.3
|
||||
model_checked_response = await self._check_response_with_model(message, model_response, current_model,thinking_id)
|
||||
|
||||
time2 = time.time()
|
||||
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
|
||||
|
||||
with Timer() as t_generate_response:
|
||||
checked = False
|
||||
if random.random() > 0:
|
||||
checked = False
|
||||
current_model = self.model_normal
|
||||
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
|
||||
model_response = await self._generate_response_with_model(
|
||||
message, current_model, thinking_id, mode="normal"
|
||||
)
|
||||
|
||||
model_checked_response = model_response
|
||||
else:
|
||||
checked = True
|
||||
current_model = self.model_normal
|
||||
current_model.temperature = 0.3 * arousal_multiplier # 激活度越高,温度越高
|
||||
print(f"生成{message.processed_plain_text}回复温度是:{current_model.temperature}")
|
||||
model_response = await self._generate_response_with_model(
|
||||
message, current_model, thinking_id, mode="simple"
|
||||
)
|
||||
|
||||
current_model.temperature = 0.3
|
||||
model_checked_response = await self._check_response_with_model(
|
||||
message, model_response, current_model, thinking_id
|
||||
)
|
||||
|
||||
if model_response:
|
||||
if checked:
|
||||
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response},思忖后,回复是:{model_checked_response},生成回复时间: {time2 - time1}秒")
|
||||
logger.info(
|
||||
f"{global_config.BOT_NICKNAME}的回复是:{model_response},思忖后,回复是:{model_checked_response},生成回复时间: {t_generate_response.human_readable()}"
|
||||
)
|
||||
else:
|
||||
logger.info(f"{global_config.BOT_NICKNAME}的回复是:{model_response},生成回复时间: {time2 - time1}秒")
|
||||
|
||||
logger.info(
|
||||
f"{global_config.BOT_NICKNAME}的回复是:{model_response},生成回复时间: {t_generate_response.human_readable()}"
|
||||
)
|
||||
|
||||
model_processed_response = await self._process_response(model_checked_response)
|
||||
|
||||
return model_processed_response
|
||||
@@ -80,11 +86,13 @@ class ResponseGenerator:
|
||||
logger.info(f"{self.current_model_type}思考,失败")
|
||||
return None
|
||||
|
||||
async def _generate_response_with_model(self, message: MessageRecv, model: LLM_request,thinking_id:str,mode:str = "normal") -> str:
|
||||
async def _generate_response_with_model(
|
||||
self, message: MessageRecv, model: LLM_request, thinking_id: str, mode: str = "normal"
|
||||
) -> str:
|
||||
sender_name = ""
|
||||
|
||||
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
|
||||
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
sender_name = (
|
||||
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
|
||||
@@ -96,45 +104,41 @@ class ResponseGenerator:
|
||||
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||
|
||||
# 构建prompt
|
||||
timer1 = time.time()
|
||||
if mode == "normal":
|
||||
prompt = await prompt_builder._build_prompt(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
elif mode == "simple":
|
||||
prompt = await prompt_builder._build_prompt_simple(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
timer2 = time.time()
|
||||
logger.info(f"构建{mode}prompt时间: {timer2 - timer1}秒")
|
||||
with Timer() as t_build_prompt:
|
||||
if mode == "normal":
|
||||
prompt = await prompt_builder._build_prompt(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
elif mode == "simple":
|
||||
prompt = await prompt_builder._build_prompt_simple(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
logger.info(f"构建{mode}prompt时间: {t_build_prompt.human_readable()}")
|
||||
|
||||
try:
|
||||
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||
|
||||
|
||||
|
||||
info_catcher.catch_after_llm_generated(
|
||||
prompt=prompt,
|
||||
response=content,
|
||||
reasoning_content=reasoning_content,
|
||||
model_name=self.current_model_name)
|
||||
|
||||
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
|
||||
)
|
||||
|
||||
except Exception:
|
||||
logger.exception("生成回复时出错")
|
||||
return None
|
||||
|
||||
|
||||
return content
|
||||
|
||||
async def _check_response_with_model(self, message: MessageRecv, content:str, model: LLM_request,thinking_id:str) -> str:
|
||||
|
||||
|
||||
async def _check_response_with_model(
|
||||
self, message: MessageRecv, content: str, model: LLM_request, thinking_id: str
|
||||
) -> str:
|
||||
_info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
|
||||
sender_name = ""
|
||||
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
sender_name = (
|
||||
@@ -145,36 +149,32 @@ class ResponseGenerator:
|
||||
sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
|
||||
else:
|
||||
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||
|
||||
|
||||
|
||||
# 构建prompt
|
||||
timer1 = time.time()
|
||||
prompt = await prompt_builder._build_prompt_check_response(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
content=content
|
||||
)
|
||||
timer2 = time.time()
|
||||
with Timer() as t_build_prompt_check:
|
||||
prompt = await prompt_builder._build_prompt_check_response(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
content=content,
|
||||
)
|
||||
logger.info(f"构建check_prompt: {prompt}")
|
||||
logger.info(f"构建check_prompt时间: {timer2 - timer1}秒")
|
||||
logger.info(f"构建check_prompt时间: {t_build_prompt_check.human_readable()}")
|
||||
|
||||
try:
|
||||
checked_content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||
|
||||
|
||||
|
||||
# info_catcher.catch_after_llm_generated(
|
||||
# prompt=prompt,
|
||||
# response=content,
|
||||
# reasoning_content=reasoning_content,
|
||||
# model_name=self.current_model_name)
|
||||
|
||||
|
||||
except Exception:
|
||||
logger.exception("检查回复时出错")
|
||||
return None
|
||||
|
||||
|
||||
return checked_content
|
||||
|
||||
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
|
||||
|
||||
58
src/plugins/utils/timer_calculater.py
Normal file
58
src/plugins/utils/timer_calculater.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from time import perf_counter
|
||||
from typing import Dict, Optional
|
||||
|
||||
"""
|
||||
计时器:用于性能计时
|
||||
|
||||
感谢D指导
|
||||
"""
|
||||
class TimerTypeError(TypeError):
|
||||
"""自定义类型错误异常"""
|
||||
def __init__(self, param_name, expected_type, actual_type):
|
||||
super().__init__(
|
||||
f"Invalid type for '{param_name}'. "
|
||||
f"Expected {expected_type}, got {actual_type.__name__}"
|
||||
)
|
||||
|
||||
class Timer:
|
||||
def __init__(self, name: Optional[str] = None, storage: Optional[Dict[str, float]] = None):
|
||||
self.name = name # 计时器名称
|
||||
self.storage = storage # 计时结果存储
|
||||
self.elapsed = None # 计时结果
|
||||
|
||||
def _validate_types(self, name, storage):
|
||||
"""类型验证核心方法"""
|
||||
# 验证 name 类型
|
||||
if name is not None and not isinstance(name, str):
|
||||
raise TimerTypeError(
|
||||
param_name="name",
|
||||
expected_type="Optional[str]",
|
||||
actual_type=type(name)
|
||||
)
|
||||
|
||||
# 验证 storage 类型
|
||||
if storage is not None and not isinstance(storage, dict):
|
||||
raise TimerTypeError(
|
||||
param_name="storage",
|
||||
expected_type="Optional[Dict[str, float]]",
|
||||
actual_type=type(storage)
|
||||
)
|
||||
def __enter__(self):
|
||||
self.start = perf_counter()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.end = perf_counter()
|
||||
self.elapsed = self.end - self.start
|
||||
if isinstance(self.storage, dict) and self.name:
|
||||
self.storage[self.name] = self.elapsed
|
||||
|
||||
def get_result(self) -> float:
|
||||
"""安全获取计时结果"""
|
||||
return self.elapsed or 0.0
|
||||
|
||||
def human_readable(self) -> str:
|
||||
"""返回人类可读时间格式"""
|
||||
if self.elapsed >= 1:
|
||||
return f"{self.elapsed:.2f}秒"
|
||||
return f"{self.elapsed*1000:.2f}毫秒"
|
||||
Reference in New Issue
Block a user