feat: 添加了读空气功能
This commit is contained in:
@@ -1,53 +0,0 @@
|
|||||||
[database]
|
|
||||||
host = "127.0.0.1"
|
|
||||||
port = 27017
|
|
||||||
name = "MegBot"
|
|
||||||
username = "" # 默认空值
|
|
||||||
password = "" # 默认空值
|
|
||||||
auth_source = "" # 默认空值
|
|
||||||
|
|
||||||
[bot]
|
|
||||||
qq = 123456 #填入你的机器人QQ
|
|
||||||
nickname = "麦麦" #你希望bot被称呼的名字
|
|
||||||
|
|
||||||
[message]
|
|
||||||
min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息
|
|
||||||
max_context_size = 15 # 麦麦获得的上下文数量,超出数量后自动丢弃
|
|
||||||
emoji_chance = 0.2 # 麦麦使用表情包的概率
|
|
||||||
|
|
||||||
[emoji]
|
|
||||||
check_interval = 120
|
|
||||||
register_interval = 10
|
|
||||||
|
|
||||||
[cq_code]
|
|
||||||
enable_pic_translate = false
|
|
||||||
|
|
||||||
|
|
||||||
[response]
|
|
||||||
api_using = "siliconflow" # 选择大模型API,可选值为siliconflow,deepseek,建议使用siliconflow,因为识图api目前只支持siliconflow的deepseek-vl2模型
|
|
||||||
model_r1_probability = 0.8 # 麦麦回答时选择R1模型的概率
|
|
||||||
model_v3_probability = 0.1 # 麦麦回答时选择V3模型的概率
|
|
||||||
model_r1_distill_probability = 0.1 # 麦麦回答时选择R1蒸馏模型的概率
|
|
||||||
|
|
||||||
[memory]
|
|
||||||
build_memory_interval = 300 # 记忆构建间隔
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[others]
|
|
||||||
enable_advance_output = true # 开启后输出更多日志,false关闭true开启
|
|
||||||
|
|
||||||
|
|
||||||
[groups]
|
|
||||||
|
|
||||||
talk_allowed = [
|
|
||||||
123456,12345678
|
|
||||||
] #可以回复消息的群
|
|
||||||
|
|
||||||
talk_frequency_down = [
|
|
||||||
123456,12345678
|
|
||||||
] #降低回复频率的群
|
|
||||||
|
|
||||||
ban_user_id = [
|
|
||||||
123456,12345678
|
|
||||||
] #禁止回复消息的QQ号
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
ENVIRONMENT=dev
|
|
||||||
HOST=127.0.0.1
|
|
||||||
PORT=8080
|
|
||||||
|
|
||||||
COMMAND_START=["/"]
|
|
||||||
|
|
||||||
# 插件配置
|
|
||||||
PLUGINS=["src2.plugins.chat"]
|
|
||||||
|
|
||||||
# 默认配置
|
|
||||||
MONGODB_HOST=127.0.0.1
|
|
||||||
MONGODB_PORT=27017
|
|
||||||
DATABASE_NAME=MegBot
|
|
||||||
MONGODB_USERNAME = "" # 默认空值
|
|
||||||
MONGODB_PASSWORD = "" # 默认空值
|
|
||||||
MONGODB_AUTH_SOURCE = "" # 默认空值
|
|
||||||
|
|
||||||
#api配置项
|
|
||||||
SILICONFLOW_KEY=
|
|
||||||
SILICONFLOW_BASE_URL=https://api.siliconflow.cn/v1/
|
|
||||||
DEEP_SEEK_KEY=
|
|
||||||
DEEP_SEEK_BASE_URL=https://api.deepseek.com/v1
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -16,7 +16,7 @@ class BotConfig:
|
|||||||
"""机器人配置类"""
|
"""机器人配置类"""
|
||||||
|
|
||||||
# 基础配置
|
# 基础配置
|
||||||
MONGODB_HOST: str = "127.0.0.1"
|
MONGODB_HOST: str = "mongodb"
|
||||||
MONGODB_PORT: int = 27017
|
MONGODB_PORT: int = 27017
|
||||||
DATABASE_NAME: str = "MegBot"
|
DATABASE_NAME: str = "MegBot"
|
||||||
MONGODB_USERNAME: Optional[str] = None # 默认空值
|
MONGODB_USERNAME: Optional[str] = None # 默认空值
|
||||||
@@ -47,6 +47,7 @@ class BotConfig:
|
|||||||
MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
|
MODEL_R1_DISTILL_PROBABILITY: float = 0.1 # R1蒸馏模型概率
|
||||||
|
|
||||||
enable_advance_output: bool = False # 是否启用高级输出
|
enable_advance_output: bool = False # 是否启用高级输出
|
||||||
|
enable_kuuki_read: bool = True # 是否启用读空气功能
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_default_config_path() -> str:
|
def get_default_config_path() -> str:
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ class LLMResponseGenerator:
|
|||||||
relationship_value = 0.0
|
relationship_value = 0.0
|
||||||
|
|
||||||
# 构建prompt
|
# 构建prompt
|
||||||
prompt = prompt_builder._build_prompt(
|
prompt,prompt_check = prompt_builder._build_prompt(
|
||||||
message_txt=message.processed_plain_text,
|
message_txt=message.processed_plain_text,
|
||||||
sender_name=sender_name,
|
sender_name=sender_name,
|
||||||
relationship_value=relationship_value,
|
relationship_value=relationship_value,
|
||||||
@@ -106,6 +106,14 @@ class LLMResponseGenerator:
|
|||||||
"max_tokens": 1024,
|
"max_tokens": 1024,
|
||||||
"temperature": 0.7
|
"temperature": 0.7
|
||||||
}
|
}
|
||||||
|
|
||||||
|
default_params_check = {
|
||||||
|
"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
||||||
|
"messages": [{"role": "user", "content": prompt_check}],
|
||||||
|
"stream": False,
|
||||||
|
"max_tokens": 1024,
|
||||||
|
"temperature": 0.7
|
||||||
|
}
|
||||||
|
|
||||||
# 更新参数
|
# 更新参数
|
||||||
if model_params:
|
if model_params:
|
||||||
@@ -114,7 +122,43 @@ class LLMResponseGenerator:
|
|||||||
def create_completion():
|
def create_completion():
|
||||||
return self.client.chat.completions.create(**default_params)
|
return self.client.chat.completions.create(**default_params)
|
||||||
|
|
||||||
|
def create_completion_check():
|
||||||
|
return self.client.chat.completions.create(**default_params_check)
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
|
|
||||||
|
# 读空气模块
|
||||||
|
reasoning_content_check=''
|
||||||
|
content_check=''
|
||||||
|
if global_config.enable_kuuki_read:
|
||||||
|
response_check = await loop.run_in_executor(None, create_completion_check)
|
||||||
|
if response_check:
|
||||||
|
reasoning_content_check = ""
|
||||||
|
if hasattr(response_check.choices[0].message, "reasoning"):
|
||||||
|
reasoning_content_check = response_check.choices[0].message.reasoning or reasoning_content_check
|
||||||
|
elif hasattr(response_check.choices[0].message, "reasoning_content"):
|
||||||
|
reasoning_content_check = response_check.choices[0].message.reasoning_content or reasoning_content_check
|
||||||
|
content_check = response_check.choices[0].message.content
|
||||||
|
print(f"\033[1;32m[读空气]\033[0m 读空气结果为{content_check}")
|
||||||
|
if 'yes' not in content_check.lower():
|
||||||
|
self.db.db.reasoning_logs.insert_one({
|
||||||
|
'time': time.time(),
|
||||||
|
'group_id': message.group_id,
|
||||||
|
'user': sender_name,
|
||||||
|
'message': message.processed_plain_text,
|
||||||
|
'model': model_name,
|
||||||
|
'reasoning_check': reasoning_content_check,
|
||||||
|
'response_check': content_check,
|
||||||
|
'reasoning': "",
|
||||||
|
'response': "",
|
||||||
|
'prompt': prompt,
|
||||||
|
'prompt_check': prompt_check,
|
||||||
|
'model_params': default_params
|
||||||
|
})
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
response = await loop.run_in_executor(None, create_completion)
|
response = await loop.run_in_executor(None, create_completion)
|
||||||
|
|
||||||
# 检查响应内容
|
# 检查响应内容
|
||||||
@@ -142,9 +186,12 @@ class LLMResponseGenerator:
|
|||||||
'user': sender_name,
|
'user': sender_name,
|
||||||
'message': message.processed_plain_text,
|
'message': message.processed_plain_text,
|
||||||
'model': model_name,
|
'model': model_name,
|
||||||
|
'reasoning_check': reasoning_content_check,
|
||||||
|
'response_check': content_check,
|
||||||
'reasoning': reasoning_content,
|
'reasoning': reasoning_content,
|
||||||
'response': content,
|
'response': content,
|
||||||
'prompt': prompt,
|
'prompt': prompt,
|
||||||
|
'prompt_check': prompt_check,
|
||||||
'model_params': default_params
|
'model_params': default_params
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -186,8 +186,18 @@ class PromptBuilder:
|
|||||||
prompt += f"{prompt_personality}\n"
|
prompt += f"{prompt_personality}\n"
|
||||||
prompt += f"{prompt_ger}\n"
|
prompt += f"{prompt_ger}\n"
|
||||||
prompt += f"{extra_info}\n"
|
prompt += f"{extra_info}\n"
|
||||||
|
|
||||||
|
activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。"
|
||||||
|
prompt_personality_check = ''
|
||||||
|
extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息一定要回复,如果自己正在和别人聊天一定要回复,其他话题如果合适搭话也可以回复,如果认为应该回复请输出yes,否则输出no,请注意是决定是否需要回复,而不是编写回复内容,除了yes和no不要输出任何回复内容。"
|
||||||
|
if personality_choice < 4/6: # 第一种人格
|
||||||
|
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧,你正在浏览qq群,{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
|
||||||
|
elif personality_choice < 1: # 第二种人格
|
||||||
|
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME},是一个女大学生,你有黑色头发,你会刷小红书, 你正在浏览qq群,{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
|
||||||
|
|
||||||
|
prompt_check_if_response=f"{prompt_info}\n{prompt_date}\n{chat_talking_prompt}\n{prompt_personality_check}"
|
||||||
|
|
||||||
return prompt
|
return prompt,prompt_check_if_response
|
||||||
|
|
||||||
def get_prompt_info(self,message:str,threshold:float):
|
def get_prompt_info(self,message:str,threshold:float):
|
||||||
related_info = ''
|
related_info = ''
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ class WillingManager:
|
|||||||
if group_id in config.talk_frequency_down_groups:
|
if group_id in config.talk_frequency_down_groups:
|
||||||
reply_probability = reply_probability / 3.5
|
reply_probability = reply_probability / 3.5
|
||||||
|
|
||||||
if is_mentioned_bot and user_id == int(1026294844):
|
if is_mentioned_bot and user_id == int(964959351):
|
||||||
reply_probability = 1
|
reply_probability = 1
|
||||||
|
|
||||||
return reply_probability
|
return reply_probability
|
||||||
|
|||||||
Reference in New Issue
Block a user