From c24bf0cdcff43320902530c0db10a4532decdb78 Mon Sep 17 00:00:00 2001 From: tcmofashi Date: Sun, 2 Mar 2025 03:45:23 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E4=BA=86=E8=AF=BB?= =?UTF-8?q?=E7=A9=BA=E6=B0=94=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config/bot_config_toml | 53 ----------------------------- config/env.example | 25 -------------- src/plugins/chat/config.py | 3 +- src/plugins/chat/llm_generator.py | 49 +++++++++++++++++++++++++- src/plugins/chat/prompt_builder.py | 12 ++++++- src/plugins/chat/willing_manager.py | 2 +- 6 files changed, 62 insertions(+), 82 deletions(-) delete mode 100644 config/bot_config_toml delete mode 100644 config/env.example diff --git a/config/bot_config_toml b/config/bot_config_toml deleted file mode 100644 index b5011c7f9..000000000 --- a/config/bot_config_toml +++ /dev/null @@ -1,53 +0,0 @@ -[database] -host = "127.0.0.1" -port = 27017 -name = "MegBot" -username = "" # 默认空值 -password = "" # 默认空值 -auth_source = "" # 默认空值 - -[bot] -qq = 123456 #填入你的机器人QQ -nickname = "麦麦" #你希望bot被称呼的名字 - -[message] -min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息 -max_context_size = 15 # 麦麦获得的上下文数量,超出数量后自动丢弃 -emoji_chance = 0.2 # 麦麦使用表情包的概率 - -[emoji] -check_interval = 120 -register_interval = 10 - -[cq_code] -enable_pic_translate = false - - -[response] -api_using = "siliconflow" # 选择大模型API,可选值为siliconflow,deepseek,建议使用siliconflow,因为识图api目前只支持siliconflow的deepseek-vl2模型 -model_r1_probability = 0.8 # 麦麦回答时选择R1模型的概率 -model_v3_probability = 0.1 # 麦麦回答时选择V3模型的概率 -model_r1_distill_probability = 0.1 # 麦麦回答时选择R1蒸馏模型的概率 - -[memory] -build_memory_interval = 300 # 记忆构建间隔 - - - -[others] -enable_advance_output = true # 开启后输出更多日志,false关闭true开启 - - -[groups] - -talk_allowed = [ - 123456,12345678 -] #可以回复消息的群 - -talk_frequency_down = [ - 123456,12345678 -] #降低回复频率的群 - -ban_user_id = [ - 123456,12345678 -] #禁止回复消息的QQ号 diff --git a/config/env.example b/config/env.example deleted file mode 100644 index 9988d58f3..000000000 --- a/config/env.example +++ /dev/null @@ -1,25 +0,0 @@ -ENVIRONMENT=dev -HOST=127.0.0.1 -PORT=8080 - -COMMAND_START=["/"] - -# 插件配置 -PLUGINS=["src2.plugins.chat"] - -# 默认配置 -MONGODB_HOST=127.0.0.1 -MONGODB_PORT=27017 -DATABASE_NAME=MegBot -MONGODB_USERNAME = "" # 默认空值 -MONGODB_PASSWORD = "" # 默认空值 -MONGODB_AUTH_SOURCE = "" # 默认空值 - -#api配置项 -SILICONFLOW_KEY= -SILICONFLOW_BASE_URL=https://api.siliconflow.cn/v1/ -DEEP_SEEK_KEY= -DEEP_SEEK_BASE_URL=https://api.deepseek.com/v1 - - - diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py index 05d492789..76dde7ebc 100644 --- a/src/plugins/chat/config.py +++ b/src/plugins/chat/config.py @@ -16,7 +16,7 @@ class BotConfig: """机器人配置类""" # 基础配置 - MONGODB_HOST: str = "127.0.0.1" + MONGODB_HOST: str = "mongodb" MONGODB_PORT: int = 27017 DATABASE_NAME: str = "MegBot" MONGODB_USERNAME: Optional[str] = None # 默认空值 @@ -47,6 +47,7 @@ class BotConfig: MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率 enable_advance_output: bool = False # 是否启用高级输出 + enable_kuuki_read: bool = True # 是否启用读空气功能 @staticmethod def get_default_config_path() -> str: diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 2ea4d7f24..137730ad5 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -91,7 +91,7 @@ class LLMResponseGenerator: relationship_value = 0.0 # 构建prompt - prompt = prompt_builder._build_prompt( + prompt,prompt_check = prompt_builder._build_prompt( message_txt=message.processed_plain_text, sender_name=sender_name, relationship_value=relationship_value, @@ -106,6 +106,14 @@ class LLMResponseGenerator: "max_tokens": 1024, "temperature": 0.7 } + + default_params_check = { + "model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "messages": [{"role": "user", "content": prompt_check}], + "stream": False, + "max_tokens": 1024, + "temperature": 0.7 + } # 更新参数 if model_params: @@ -114,7 +122,43 @@ class LLMResponseGenerator: def create_completion(): return self.client.chat.completions.create(**default_params) + def create_completion_check(): + return self.client.chat.completions.create(**default_params_check) + loop = asyncio.get_event_loop() + + # 读空气模块 + reasoning_content_check='' + content_check='' + if global_config.enable_kuuki_read: + response_check = await loop.run_in_executor(None, create_completion_check) + if response_check: + reasoning_content_check = "" + if hasattr(response_check.choices[0].message, "reasoning"): + reasoning_content_check = response_check.choices[0].message.reasoning or reasoning_content_check + elif hasattr(response_check.choices[0].message, "reasoning_content"): + reasoning_content_check = response_check.choices[0].message.reasoning_content or reasoning_content_check + content_check = response_check.choices[0].message.content + print(f"\033[1;32m[读空气]\033[0m 读空气结果为{content_check}") + if 'yes' not in content_check.lower(): + self.db.db.reasoning_logs.insert_one({ + 'time': time.time(), + 'group_id': message.group_id, + 'user': sender_name, + 'message': message.processed_plain_text, + 'model': model_name, + 'reasoning_check': reasoning_content_check, + 'response_check': content_check, + 'reasoning': "", + 'response': "", + 'prompt': prompt, + 'prompt_check': prompt_check, + 'model_params': default_params + }) + return None + + + response = await loop.run_in_executor(None, create_completion) # 检查响应内容 @@ -142,9 +186,12 @@ class LLMResponseGenerator: 'user': sender_name, 'message': message.processed_plain_text, 'model': model_name, + 'reasoning_check': reasoning_content_check, + 'response_check': content_check, 'reasoning': reasoning_content, 'response': content, 'prompt': prompt, + 'prompt_check': prompt_check, 'model_params': default_params }) diff --git a/src/plugins/chat/prompt_builder.py b/src/plugins/chat/prompt_builder.py index da9037cfa..215d926a9 100644 --- a/src/plugins/chat/prompt_builder.py +++ b/src/plugins/chat/prompt_builder.py @@ -186,8 +186,18 @@ class PromptBuilder: prompt += f"{prompt_personality}\n" prompt += f"{prompt_ger}\n" prompt += f"{extra_info}\n" + + activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。" + prompt_personality_check = '' + extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息一定要回复,如果自己正在和别人聊天一定要回复,其他话题如果合适搭话也可以回复,如果认为应该回复请输出yes,否则输出no,请注意是决定是否需要回复,而不是编写回复内容,除了yes和no不要输出任何回复内容。" + if personality_choice < 4/6: # 第一种人格 + prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧,你正在浏览qq群,{promt_info_prompt} {activate_prompt_check} {extra_check_info}''' + elif personality_choice < 1: # 第二种人格 + prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},是一个女大学生,你有黑色头发,你会刷小红书, 你正在浏览qq群,{promt_info_prompt} {activate_prompt_check} {extra_check_info}''' + + prompt_check_if_response=f"{prompt_info}\n{prompt_date}\n{chat_talking_prompt}\n{prompt_personality_check}" - return prompt + return prompt,prompt_check_if_response def get_prompt_info(self,message:str,threshold:float): related_info = '' diff --git a/src/plugins/chat/willing_manager.py b/src/plugins/chat/willing_manager.py index 037c2d517..8f3734a4f 100644 --- a/src/plugins/chat/willing_manager.py +++ b/src/plugins/chat/willing_manager.py @@ -58,7 +58,7 @@ class WillingManager: if group_id in config.talk_frequency_down_groups: reply_probability = reply_probability / 3.5 - if is_mentioned_bot and user_id == int(1026294844): + if is_mentioned_bot and user_id == int(964959351): reply_probability = 1 return reply_probability