fix:修复神秘魔法士

This commit is contained in:
SengokuCola
2025-04-24 15:02:17 +08:00
parent 3c736f22e4
commit 5e7131ed00
3 changed files with 9 additions and 40 deletions

View File

@@ -654,8 +654,6 @@ class HeartFChatting:
logger.warning(f"{log_prefix}[Replier-{thinking_id}] LLM生成了一个空回复集。") logger.warning(f"{log_prefix}[Replier-{thinking_id}] LLM生成了一个空回复集。")
return None return None
# --- 准备并返回结果 --- #
# logger.info(f"{log_prefix}[Replier-{thinking_id}] 成功生成了回复集: {' '.join(response_set)[:50]}...")
return response_set return response_set
except Exception as e: except Exception as e:

View File

@@ -47,10 +47,6 @@ class HeartFCGenerator:
) -> Optional[List[str]]: ) -> Optional[List[str]]:
"""根据当前模型类型选择对应的生成函数""" """根据当前模型类型选择对应的生成函数"""
logger.info(
f"思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}"
)
arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier() arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
with Timer() as t_generate_response: with Timer() as t_generate_response:
@@ -80,27 +76,25 @@ class HeartFCGenerator:
model: LLMRequest, model: LLMRequest,
thinking_id: str, thinking_id: str,
) -> str: ) -> str:
sender_name = ""
info_catcher = info_catcher_manager.get_info_catcher(thinking_id) info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
with Timer() as t_build_prompt: with Timer() as t_build_prompt:
prompt = await prompt_builder.build_prompt( prompt = await prompt_builder.build_prompt(
build_mode="focus", build_mode="focus",
reason=reason, reason=reason,
current_mind_info=current_mind_info, current_mind_info=current_mind_info,
structured_info=structured_info, structured_info=structured_info,
message_txt=message.processed_plain_text, message_txt="",
sender_name=sender_name, sender_name="",
chat_stream=message.chat_stream, chat_stream=message.chat_stream,
) )
logger.info(f"构建prompt时间: {t_build_prompt.human_readable}") # logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
try: try:
content, reasoning_content, self.current_model_name = await model.generate_response(prompt) content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
logger.info(f"\nprompt:{prompt}\n生成回复{content}\n")
info_catcher.catch_after_llm_generated( info_catcher.catch_after_llm_generated(
prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name
) )

View File

@@ -25,13 +25,13 @@ def init_prompt():
{structured_info} {structured_info}
{chat_target} {chat_target}
{chat_talking_prompt} {chat_talking_prompt}
现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息\n 现在你想要在群里发言或者回复。\n
你的网名叫{bot_name}{prompt_personality} {prompt_identity} 你的网名叫{bot_name}{prompt_personality} {prompt_identity}
你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些, 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些,
你刚刚脑子里在想: 你刚刚脑子里在想:
{current_mind_info} {current_mind_info}
{reason} {reason}
回复尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。请一次只回复一个话题,不要同时回复多个人。{prompt_ger} 回复尽量简短一些。请注意把握聊天内容,不要回复的太有条理,可以有个性。请一次只回复一个话题,不要同时回复多个人,不用指出你回复的是谁{prompt_ger}
请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 ,注意只输出回复内容。 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 ,注意只输出回复内容。
{moderation_prompt}。注意:不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。""", {moderation_prompt}。注意:不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 )。""",
"heart_flow_prompt", "heart_flow_prompt",
@@ -95,12 +95,12 @@ class PromptBuilder:
elif build_mode == "focus": elif build_mode == "focus":
return await self._build_prompt_focus( return await self._build_prompt_focus(
reason, current_mind_info, structured_info, chat_stream, message_txt, sender_name reason, current_mind_info, structured_info, chat_stream,
) )
return None return None
async def _build_prompt_focus( async def _build_prompt_focus(
self, reason, current_mind_info, structured_info, chat_stream, message_txt: str, sender_name: str = "某人" self, reason, current_mind_info, structured_info, chat_stream
) -> tuple[str, str]: ) -> tuple[str, str]:
individuality = Individuality.get_instance() individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1) prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
@@ -128,26 +128,6 @@ class PromptBuilder:
read_mark=0.0, read_mark=0.0,
) )
# 关键词检测与反应
keywords_reaction_prompt = ""
for rule in global_config.keywords_reaction_rules:
if rule.get("enable", False):
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
logger.info(
f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}"
)
keywords_reaction_prompt += rule.get("reaction", "") + ""
else:
for pattern in rule.get("regex", []):
result = pattern.search(message_txt)
if result:
reaction = rule.get("reaction", "")
for name, content in result.groupdict().items():
reaction = reaction.replace(f"[{name}]", content)
logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}")
keywords_reaction_prompt += reaction + ""
break
# 中文高手(新加的好玩功能) # 中文高手(新加的好玩功能)
prompt_ger = "" prompt_ger = ""
if random.random() < 0.04: if random.random() < 0.04:
@@ -164,8 +144,6 @@ class PromptBuilder:
if chat_in_group if chat_in_group
else await global_prompt_manager.get_prompt_async("chat_target_private1"), else await global_prompt_manager.get_prompt_async("chat_target_private1"),
chat_talking_prompt=chat_talking_prompt, chat_talking_prompt=chat_talking_prompt,
sender_name=sender_name,
message_txt=message_txt,
bot_name=global_config.BOT_NICKNAME, bot_name=global_config.BOT_NICKNAME,
prompt_personality=prompt_personality, prompt_personality=prompt_personality,
prompt_identity=prompt_identity, prompt_identity=prompt_identity,
@@ -174,7 +152,6 @@ class PromptBuilder:
else await global_prompt_manager.get_prompt_async("chat_target_private2"), else await global_prompt_manager.get_prompt_async("chat_target_private2"),
current_mind_info=current_mind_info, current_mind_info=current_mind_info,
reason=reason, reason=reason,
keywords_reaction_prompt=keywords_reaction_prompt,
prompt_ger=prompt_ger, prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
) )