From 136a8078964e28e33d11e560b8a0b0cf9ff86ec3 Mon Sep 17 00:00:00 2001 From: tcmofashi Date: Sun, 2 Mar 2025 03:45:23 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E4=BA=86=E8=AF=BB?= =?UTF-8?q?=E7=A9=BA=E6=B0=94=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/plugins/chat/config.py | 1 + src/plugins/chat/llm_generator.py | 49 ++++++++++++++++++++++++++++- src/plugins/chat/prompt_builder.py | 12 ++++++- src/plugins/chat/willing_manager.py | 2 +- 4 files changed, 61 insertions(+), 3 deletions(-) diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py index 05d492789..efaa96523 100644 --- a/src/plugins/chat/config.py +++ b/src/plugins/chat/config.py @@ -47,6 +47,7 @@ class BotConfig: MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率 enable_advance_output: bool = False # 是否启用高级输出 + enable_kuuki_read: bool = True # 是否启用读空气功能 @staticmethod def get_default_config_path() -> str: diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 2ea4d7f24..137730ad5 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -91,7 +91,7 @@ class LLMResponseGenerator: relationship_value = 0.0 # 构建prompt - prompt = prompt_builder._build_prompt( + prompt,prompt_check = prompt_builder._build_prompt( message_txt=message.processed_plain_text, sender_name=sender_name, relationship_value=relationship_value, @@ -106,6 +106,14 @@ class LLMResponseGenerator: "max_tokens": 1024, "temperature": 0.7 } + + default_params_check = { + "model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "messages": [{"role": "user", "content": prompt_check}], + "stream": False, + "max_tokens": 1024, + "temperature": 0.7 + } # 更新参数 if model_params: @@ -114,7 +122,43 @@ class LLMResponseGenerator: def create_completion(): return self.client.chat.completions.create(**default_params) + def create_completion_check(): + return self.client.chat.completions.create(**default_params_check) + loop = asyncio.get_event_loop() + + # 读空气模块 + reasoning_content_check='' + content_check='' + if global_config.enable_kuuki_read: + response_check = await loop.run_in_executor(None, create_completion_check) + if response_check: + reasoning_content_check = "" + if hasattr(response_check.choices[0].message, "reasoning"): + reasoning_content_check = response_check.choices[0].message.reasoning or reasoning_content_check + elif hasattr(response_check.choices[0].message, "reasoning_content"): + reasoning_content_check = response_check.choices[0].message.reasoning_content or reasoning_content_check + content_check = response_check.choices[0].message.content + print(f"\033[1;32m[读空气]\033[0m 读空气结果为{content_check}") + if 'yes' not in content_check.lower(): + self.db.db.reasoning_logs.insert_one({ + 'time': time.time(), + 'group_id': message.group_id, + 'user': sender_name, + 'message': message.processed_plain_text, + 'model': model_name, + 'reasoning_check': reasoning_content_check, + 'response_check': content_check, + 'reasoning': "", + 'response': "", + 'prompt': prompt, + 'prompt_check': prompt_check, + 'model_params': default_params + }) + return None + + + response = await loop.run_in_executor(None, create_completion) # 检查响应内容 @@ -142,9 +186,12 @@ class LLMResponseGenerator: 'user': sender_name, 'message': message.processed_plain_text, 'model': model_name, + 'reasoning_check': reasoning_content_check, + 'response_check': content_check, 'reasoning': reasoning_content, 'response': content, 'prompt': prompt, + 'prompt_check': prompt_check, 'model_params': default_params }) diff --git a/src/plugins/chat/prompt_builder.py b/src/plugins/chat/prompt_builder.py index da9037cfa..215d926a9 100644 --- a/src/plugins/chat/prompt_builder.py +++ b/src/plugins/chat/prompt_builder.py @@ -186,8 +186,18 @@ class PromptBuilder: prompt += f"{prompt_personality}\n" prompt += f"{prompt_ger}\n" prompt += f"{extra_info}\n" + + activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。" + prompt_personality_check = '' + extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息一定要回复,如果自己正在和别人聊天一定要回复,其他话题如果合适搭话也可以回复,如果认为应该回复请输出yes,否则输出no,请注意是决定是否需要回复,而不是编写回复内容,除了yes和no不要输出任何回复内容。" + if personality_choice < 4/6: # 第一种人格 + prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧,你正在浏览qq群,{promt_info_prompt} {activate_prompt_check} {extra_check_info}''' + elif personality_choice < 1: # 第二种人格 + prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},是一个女大学生,你有黑色头发,你会刷小红书, 你正在浏览qq群,{promt_info_prompt} {activate_prompt_check} {extra_check_info}''' + + prompt_check_if_response=f"{prompt_info}\n{prompt_date}\n{chat_talking_prompt}\n{prompt_personality_check}" - return prompt + return prompt,prompt_check_if_response def get_prompt_info(self,message:str,threshold:float): related_info = '' diff --git a/src/plugins/chat/willing_manager.py b/src/plugins/chat/willing_manager.py index 037c2d517..8f3734a4f 100644 --- a/src/plugins/chat/willing_manager.py +++ b/src/plugins/chat/willing_manager.py @@ -58,7 +58,7 @@ class WillingManager: if group_id in config.talk_frequency_down_groups: reply_probability = reply_probability / 3.5 - if is_mentioned_bot and user_id == int(1026294844): + if is_mentioned_bot and user_id == int(964959351): reply_probability = 1 return reply_probability