diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py index d746059e7..33fe01095 100644 --- a/src/plugins/heartFC_chat/heartFC_chat.py +++ b/src/plugins/heartFC_chat/heartFC_chat.py @@ -654,8 +654,6 @@ class HeartFChatting: logger.warning(f"{log_prefix}[Replier-{thinking_id}] LLM生成了一个空回复集。") return None - # --- 准备并返回结果 --- # - # logger.info(f"{log_prefix}[Replier-{thinking_id}] 成功生成了回复集: {' '.join(response_set)[:50]}...") return response_set except Exception as e: diff --git a/src/plugins/heartFC_chat/heartFC_generator.py b/src/plugins/heartFC_chat/heartFC_generator.py index cbf050bd9..464e94e91 100644 --- a/src/plugins/heartFC_chat/heartFC_generator.py +++ b/src/plugins/heartFC_chat/heartFC_generator.py @@ -47,10 +47,6 @@ class HeartFCGenerator: ) -> Optional[List[str]]: """根据当前模型类型选择对应的生成函数""" - logger.info( - f"思考:{message.processed_plain_text[:30] + '...' if len(message.processed_plain_text) > 30 else message.processed_plain_text}" - ) - arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier() with Timer() as t_generate_response: @@ -80,27 +76,25 @@ class HeartFCGenerator: model: LLMRequest, thinking_id: str, ) -> str: - sender_name = "" - info_catcher = info_catcher_manager.get_info_catcher(thinking_id) - sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>" - with Timer() as t_build_prompt: prompt = await prompt_builder.build_prompt( build_mode="focus", reason=reason, current_mind_info=current_mind_info, structured_info=structured_info, - message_txt=message.processed_plain_text, - sender_name=sender_name, + message_txt="", + sender_name="", chat_stream=message.chat_stream, ) - logger.info(f"构建prompt时间: {t_build_prompt.human_readable}") + # logger.info(f"构建prompt时间: {t_build_prompt.human_readable}") try: content, reasoning_content, self.current_model_name = await model.generate_response(prompt) + logger.info(f"\nprompt:{prompt}\n生成回复{content}\n") + info_catcher.catch_after_llm_generated( prompt=prompt, response=content, reasoning_content=reasoning_content, model_name=self.current_model_name ) diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py index c5b04ed93..880d0a27d 100644 --- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py +++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py @@ -25,13 +25,13 @@ def init_prompt(): {structured_info} {chat_target} {chat_talking_prompt} -现在"{sender_name}"说的:{message_txt}。引起了你的注意,你想要在群里发言发言或者回复这条消息。\n +现在你想要在群里发言或者回复。\n 你的网名叫{bot_name},{prompt_personality} {prompt_identity}。 你正在{chat_target_2},现在请你读读之前的聊天记录,然后给出日常且口语化的回复,平淡一些, 你刚刚脑子里在想: {current_mind_info} {reason} -回复尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。请一次只回复一个话题,不要同时回复多个人。{prompt_ger} +回复尽量简短一些。请注意把握聊天内容,不要回复的太有条理,可以有个性。请一次只回复一个话题,不要同时回复多个人,不用指出你回复的是谁。{prompt_ger} 请回复的平淡一些,简短一些,说中文,不要刻意突出自身学科背景,尽量不要说你说过的话 ,注意只输出回复内容。 {moderation_prompt}。注意:不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。""", "heart_flow_prompt", @@ -95,12 +95,12 @@ class PromptBuilder: elif build_mode == "focus": return await self._build_prompt_focus( - reason, current_mind_info, structured_info, chat_stream, message_txt, sender_name + reason, current_mind_info, structured_info, chat_stream, ) return None async def _build_prompt_focus( - self, reason, current_mind_info, structured_info, chat_stream, message_txt: str, sender_name: str = "某人" + self, reason, current_mind_info, structured_info, chat_stream ) -> tuple[str, str]: individuality = Individuality.get_instance() prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1) @@ -128,26 +128,6 @@ class PromptBuilder: read_mark=0.0, ) - # 关键词检测与反应 - keywords_reaction_prompt = "" - for rule in global_config.keywords_reaction_rules: - if rule.get("enable", False): - if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])): - logger.info( - f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}" - ) - keywords_reaction_prompt += rule.get("reaction", "") + "," - else: - for pattern in rule.get("regex", []): - result = pattern.search(message_txt) - if result: - reaction = rule.get("reaction", "") - for name, content in result.groupdict().items(): - reaction = reaction.replace(f"[{name}]", content) - logger.info(f"匹配到以下正则表达式:{pattern},触发反应:{reaction}") - keywords_reaction_prompt += reaction + "," - break - # 中文高手(新加的好玩功能) prompt_ger = "" if random.random() < 0.04: @@ -164,8 +144,6 @@ class PromptBuilder: if chat_in_group else await global_prompt_manager.get_prompt_async("chat_target_private1"), chat_talking_prompt=chat_talking_prompt, - sender_name=sender_name, - message_txt=message_txt, bot_name=global_config.BOT_NICKNAME, prompt_personality=prompt_personality, prompt_identity=prompt_identity, @@ -174,7 +152,6 @@ class PromptBuilder: else await global_prompt_manager.get_prompt_async("chat_target_private2"), current_mind_info=current_mind_info, reason=reason, - keywords_reaction_prompt=keywords_reaction_prompt, prompt_ger=prompt_ger, moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"), )