Merge remote-tracking branch 'upstream/debug' into debug

This commit is contained in:
Rikki
2025-03-09 05:28:11 +08:00
46 changed files with 730 additions and 305 deletions

View File

@@ -1,6 +1,8 @@
from pymongo import MongoClient
from typing import Optional
from pymongo import MongoClient
class Database:
_instance: Optional["Database"] = None

View File

@@ -1,12 +1,12 @@
import customtkinter as ctk
from typing import Dict, List
import json
from datetime import datetime
import time
import threading
import os
import queue
import sys
import os
import threading
import time
from datetime import datetime
from typing import Dict, List
import customtkinter as ctk
from dotenv import load_dotenv
# 获取当前文件的目录
@@ -25,9 +25,11 @@ else:
print("未找到环境配置文件")
sys.exit(1)
from pymongo import MongoClient
from typing import Optional
from pymongo import MongoClient
class Database:
_instance: Optional["Database"] = None

View File

@@ -1,6 +1,5 @@
from typing import Dict, List, Union, Optional, Any
import base64
import os
from typing import Any, Dict, List, Union
"""
OneBot v11 Message Segment Builder

View File

@@ -1,21 +1,23 @@
from loguru import logger
from nonebot import on_message, on_command, require, get_driver
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent, Message, MessageSegment
from nonebot.typing import T_State
from ...common.database import Database
from .config import global_config
import os
import asyncio
import os
import random
from .relationship_manager import relationship_manager
from ..schedule.schedule_generator import bot_schedule
from .willing_manager import willing_manager
from nonebot.rule import to_me
from .bot import chat_bot
from .emoji_manager import emoji_manager
from ..moods.moods import MoodManager # 导入情绪管理器
import time
from loguru import logger
from nonebot import get_driver, on_command, on_message, require
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent, Message, MessageSegment
from nonebot.rule import to_me
from nonebot.typing import T_State
from ...common.database import Database
from ..moods.moods import MoodManager # 导入情绪管理器
from ..schedule.schedule_generator import bot_schedule
from ..utils.statistic import LLMStatistics
from .bot import chat_bot
from .config import global_config
from .emoji_manager import emoji_manager
from .relationship_manager import relationship_manager
from .willing_manager import willing_manager
# 创建LLM统计实例
llm_stats = LLMStatistics("llm_statistics.txt")
@@ -39,12 +41,11 @@ print("\033[1;32m[初始化数据库完成]\033[0m")
# 导入其他模块
from ..memory_system.memory import hippocampus, memory_graph
from .bot import ChatBot
from .emoji_manager import emoji_manager
# from .message_send_control import message_sender
from .relationship_manager import relationship_manager
from .message_sender import message_manager,message_sender
from ..memory_system.memory import memory_graph,hippocampus
from .message_sender import message_manager, message_sender
# 初始化表情管理器
emoji_manager.initialize()

View File

@@ -1,23 +1,27 @@
from nonebot.adapters.onebot.v11 import GroupMessageEvent, Message as EventMessage, Bot
from .message import Message, MessageSet, Message_Sending
from .config import BotConfig, global_config
from .storage import MessageStorage
from .llm_generator import ResponseGenerator
# from .message_stream import MessageStream, MessageStreamContainer
from .topic_identifier import topic_identifier
from random import random, choice
from .emoji_manager import emoji_manager # 导入表情包管理器
from ..moods.moods import MoodManager # 导入情绪管理器
import time
import os
from .cq_code import CQCode # 导入CQCode模块
from .message_sender import message_manager # 导入新的消息管理器
from .message import Message_Thinking # 导入 Message_Thinking 类
from .relationship_manager import relationship_manager
from .willing_manager import willing_manager # 导入意愿管理器
from .utils import is_mentioned_bot_in_txt, calculate_typing_time
from ..memory_system.memory import memory_graph,hippocampus
from random import random
from loguru import logger
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent
from ..memory_system.memory import hippocampus
from ..moods.moods import MoodManager # 导入情绪管理器
from .config import global_config
from .cq_code import CQCode # 导入CQCode模块
from .emoji_manager import emoji_manager # 导入表情包管理器
from .llm_generator import ResponseGenerator
from .message import (
Message,
Message_Sending,
Message_Thinking, # 导入 Message_Thinking 类
MessageSet,
)
from .message_sender import message_manager # 导入新的消息管理器
from .relationship_manager import relationship_manager
from .storage import MessageStorage
from .utils import calculate_typing_time, is_mentioned_bot_in_txt
from .willing_manager import willing_manager # 导入意愿管理器
class ChatBot:
def __init__(self):
@@ -123,6 +127,11 @@ class ChatBot:
container.messages.remove(msg)
# print(f"\033[1;32m[思考消息删除]\033[0m 已找到思考消息对象,开始删除")
break
# 如果找不到思考消息,直接返回
if not thinking_message:
print(f"\033[1;33m[警告]\033[0m 未找到对应的思考消息,可能已超时被移除")
return
#记录开始思考的时间,避免从思考到回复的时间太久
thinking_start_time = thinking_message.thinking_start_time

View File

@@ -1,16 +1,13 @@
from dataclasses import dataclass, field
from typing import Dict, Any, Optional, Set
import os
import configparser
from dataclasses import dataclass, field
from typing import Dict, Optional
import tomli
import sys
from loguru import logger
from nonebot import get_driver
from packaging import version
from packaging.version import Version, InvalidVersion
from packaging.specifiers import SpecifierSet,InvalidSpecifier
@dataclass
class BotConfig:
"""机器人配置类"""
@@ -28,6 +25,12 @@ class BotConfig:
talk_allowed_groups = set()
talk_frequency_down_groups = set()
thinking_timeout: int = 100 # 思考时间
response_willing_amplifier: float = 1.0 # 回复意愿放大系数
response_interested_rate_amplifier: float = 1.0 # 回复兴趣度放大系数
down_frequency_rate: float = 3.5 # 降低回复频率的群组回复意愿降低系数
ban_user_id = set()
build_memory_interval: int = 30 # 记忆构建间隔(秒)
@@ -65,13 +68,20 @@ class BotConfig:
mood_decay_rate: float = 0.95 # 情绪衰减率
mood_intensity_factor: float = 0.7 # 情绪强度因子
keywords_reaction_rules = [] # 关键词回复规则
# 默认人设
PROMPT_PERSONALITY=[
"曾经是一个学习地质的女大学生,现在学习心理学和脑科学,你会刷贴吧",
"是一个女大学生,你有黑色头发,你会刷小红书"
"是一个女大学生,你有黑色头发,你会刷小红书",
"是一个女大学生你会刷b站对ACG文化感兴趣"
]
PROMPT_SCHEDULE_GEN="一个曾经学习地质,现在学习心理学和脑科学的女大学生喜欢刷qq贴吧知乎和小红书"
PERSONALITY_1: float = 0.6 # 第一种人格概率
PERSONALITY_2: float = 0.3 # 第二种人格概率
PERSONALITY_3: float = 0.1 # 第三种人格概率
@staticmethod
def get_config_dir() -> str:
"""获取配置文件目录"""
@@ -139,13 +149,18 @@ class BotConfig:
config = cls()
def personality(parent: dict):
personality_config = parent['personality']
personality = personality_config.get('prompt_personality')
personality_config=parent['personality']
personality=personality_config.get('prompt_personality')
if len(personality) >= 2:
logger.info(f"载入自定义人格:{personality}")
config.PROMPT_PERSONALITY=personality_config.get('prompt_personality',config.PROMPT_PERSONALITY)
logger.info(f"载入自定义日程prompt:{personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)}")
config.PROMPT_SCHEDULE_GEN=personality_config.get('prompt_schedule',config.PROMPT_SCHEDULE_GEN)
if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
config.PERSONALITY_1=personality_config.get('personality_1_probability',config.PERSONALITY_1)
config.PERSONALITY_2=personality_config.get('personality_2_probability',config.PERSONALITY_2)
config.PERSONALITY_3=personality_config.get('personality_3_probability',config.PERSONALITY_3)
def emoji(parent: dict):
emoji_config = parent["emoji"]
@@ -246,6 +261,12 @@ class BotConfig:
config.emoji_chance = msg_config.get("emoji_chance", config.emoji_chance)
config.ban_words=msg_config.get("ban_words",config.ban_words)
if config.INNER_VERSION in SpecifierSet(">=0.0.2"):
config.thinking_timeout = msg_config.get("thinking_timeout", config.thinking_timeout)
config.response_willing_amplifier = msg_config.get("response_willing_amplifier", config.response_willing_amplifier)
config.response_interested_rate_amplifier = msg_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
config.down_frequency_rate = msg_config.get("down_frequency_rate", config.down_frequency_rate)
def memory(parent: dict):
memory_config = parent["memory"]
config.build_memory_interval = memory_config.get("build_memory_interval", config.build_memory_interval)
@@ -257,6 +278,11 @@ class BotConfig:
config.mood_decay_rate = mood_config.get("mood_decay_rate", config.mood_decay_rate)
config.mood_intensity_factor = mood_config.get("mood_intensity_factor", config.mood_intensity_factor)
def keywords_reaction(parent: dict):
keywords_reaction_config = parent["keywords_reaction"]
if keywords_reaction_config.get("enable", False):
config.keywords_reaction_rules = keywords_reaction_config.get("rules", config.keywords_reaction_rules)
def groups(parent: dict):
groups_config = parent["groups"]
config.talk_allowed_groups = set(groups_config.get("talk_allowed", []))
@@ -269,7 +295,7 @@ class BotConfig:
config.enable_kuuki_read = others_config.get("enable_kuuki_read", config.enable_kuuki_read)
# 版本表达式:>=1.0.0,<2.0.0
# 允许字段func: method, support: str, notice: str
# 允许字段func: method, support: str, notice: str, necessary: bool
# 如果使用 notice 字段,在该组配置加载时,会展示该字段对用户的警示
# 例如:"notice": "personality 将在 1.3.2 后被移除",那么在有效版本中的用户就会虽然可以
# 正常执行程序,但是会看到这条自定义提示
@@ -310,6 +336,11 @@ class BotConfig:
"func": mood,
"support": ">=0.0.0"
},
"keywords_reaction": {
"func": keywords_reaction,
"support": ">=0.0.2",
"necessary": False
},
"groups": {
"func": groups,
"support": ">=0.0.0"
@@ -327,7 +358,11 @@ class BotConfig:
if os.path.exists(config_path):
with open(config_path, "rb") as f:
toml_dict = tomli.load(f)
try:
toml_dict = tomli.load(f)
except(tomli.TOMLDecodeError) as e:
logger.critical(f"配置文件bot_config.toml填写有误请检查第{e.lineno}行第{e.colno}处:{e.msg}")
exit(1)
# 获取配置文件版本
config.INNER_VERSION = cls.get_config_version(toml_dict)
@@ -339,7 +374,7 @@ class BotConfig:
# 检查配置文件版本是否在支持范围内
if config.INNER_VERSION in group_specifierset:
# 如果版本在支持范围内,检查是否在支持的末端
# 如果版本在支持范围内,检查是否存在通知
if 'notice' in include_configs[key]:
logger.warning(include_configs[key]["notice"])
@@ -352,7 +387,13 @@ class BotConfig:
f"当前程序仅支持以下版本范围: {group_specifierset}"
)
raise InvalidVersion(f"当前程序仅支持以下版本范围: {group_specifierset}")
# 如果 necessary 项目存在,而且显式声明是 False进入特殊处理
elif "necessary" in include_configs[key] and include_configs[key].get("necessary") == False:
# 通过 pass 处理的项虽然直接忽略也是可以的,但是为了不增加理解困难,依然需要在这里显式处理
if key == "keywords_reaction":
pass
else:
# 如果用户根本没有需要的配置项,提示缺少配置
logger.error(f"配置文件中缺少必需的字段: '{key}'")

View File

@@ -1,24 +1,23 @@
from dataclasses import dataclass
from typing import Dict, Optional, List, Union
import html
import requests
import base64
from PIL import Image
import html
import os
from random import random
from nonebot.adapters.onebot.v11 import Bot
from .config import global_config
import time
import asyncio
from .utils_image import storage_image, storage_emoji
from .utils_user import get_user_nickname
from ..models.utils_model import LLM_request
from .mapper import emojimapper
from dataclasses import dataclass
from typing import Dict, Optional
import requests
# 解析各种CQ码
# 包含CQ码类
import urllib3
from urllib3.util import create_urllib3_context
from nonebot import get_driver
from urllib3.util import create_urllib3_context
from ..models.utils_model import LLM_request
from .config import global_config
from .mapper import emojimapper
from .utils_image import storage_emoji, storage_image
from .utils_user import get_user_nickname
driver = get_driver()
config = driver.config
@@ -81,7 +80,7 @@ class CQCode:
if user_nickname:
self.translated_plain_text = f"[@{user_nickname}]"
else:
self.translated_plain_text = f"@某人"
self.translated_plain_text = "@某人"
elif self.type == 'reply':
self.translated_plain_text = await self.translate_reply()
elif self.type == 'face':
@@ -308,7 +307,7 @@ class CQCode:
return f"[回复 {self.reply_message.sender.nickname} 的消息: {message_obj.processed_plain_text}]"
else:
print(f"\033[1;31m[错误]\033[0m 回复消息的sender.user_id为空")
print("\033[1;31m[错误]\033[0m 回复消息的sender.user_id为空")
return '[回复某人消息]'
@staticmethod

View File

@@ -1,27 +1,18 @@
from typing import List, Dict, Optional
import random
from ...common.database import Database
import os
import json
from dataclasses import dataclass
import jieba.analyse as jieba_analyse
import aiohttp
import hashlib
from datetime import datetime
import base64
import shutil
import asyncio
import os
import random
import time
from PIL import Image
import io
from loguru import logger
import traceback
from typing import Optional
from loguru import logger
from nonebot import get_driver
from ...common.database import Database
from ..chat.config import global_config
from ..models.utils_model import LLM_request
from ..chat.utils_image import image_path_to_base64
from ..chat.utils import get_embedding
from ..chat.utils_image import image_path_to_base64
from ..models.utils_model import LLM_request
driver = get_driver()
config = driver.config
@@ -275,7 +266,7 @@ class EmojiManager:
async def _periodic_scan(self, interval_MINS: int = 10):
"""定期扫描新表情包"""
while True:
print(f"\033[1;36m[表情包]\033[0m 开始扫描新表情包...")
print("\033[1;36m[表情包]\033[0m 开始扫描新表情包...")
await self.scan_new_emojis()
await asyncio.sleep(interval_MINS * 60) # 每600秒扫描一次

View File

@@ -1,19 +1,16 @@
from typing import Dict, Any, List, Optional, Union, Tuple
from openai import OpenAI
import asyncio
from functools import partial
from .message import Message
from .config import global_config
from ...common.database import Database
import random
import time
import numpy as np
from .relationship_manager import relationship_manager
from .prompt_builder import prompt_builder
from .config import global_config
from .utils import process_llm_response
from typing import List, Optional, Tuple, Union
from nonebot import get_driver
from ...common.database import Database
from ..models.utils_model import LLM_request
from .config import global_config
from .message import Message
from .prompt_builder import prompt_builder
from .relationship_manager import relationship_manager
from .utils import process_llm_response
driver = get_driver()
config = driver.config

View File

@@ -1,16 +1,12 @@
from dataclasses import dataclass
from typing import List, Optional, Dict, Tuple, ForwardRef
import time
import jieba.analyse as jieba_analyse
import os
from datetime import datetime
from ...common.database import Database
from PIL import Image
from .config import global_config
from dataclasses import dataclass
from typing import Dict, ForwardRef, List, Optional
import urllib3
from .utils_user import get_user_nickname,get_user_cardname,get_groupname
from .cq_code import CQCode, cq_code_tool
from .utils_cq import parse_cq_code
from .cq_code import cq_code_tool,CQCode
from .utils_user import get_groupname, get_user_cardname, get_user_nickname
Message = ForwardRef('Message') # 添加这行
# 禁用SSL警告

View File

@@ -1,14 +1,15 @@
from typing import Union, List, Optional, Dict
from collections import deque
from .message import Message, Message_Thinking, MessageSet, Message_Sending
import time
import asyncio
import time
from typing import Dict, List, Optional, Union
from nonebot.adapters.onebot.v11 import Bot
from .config import global_config
from .storage import MessageStorage
from .cq_code import cq_code_tool
import random
from .message import Message, Message_Sending, Message_Thinking, MessageSet
from .storage import MessageStorage
from .utils import calculate_typing_time
from .config import global_config
class Message_Sender:
"""发送器"""
@@ -161,8 +162,12 @@ class MessageManager:
#优先等待这条消息
message_earliest.update_thinking_time()
thinking_time = message_earliest.thinking_time
if thinking_time % 10 == 0:
print(f"\033[1;34m[调试]\033[0m 消息正在思考中,已思考{int(thinking_time)}")
print(f"\033[1;34m[调试]\033[0m 消息正在思考中,已思考{int(thinking_time)}\033[K\r", end='', flush=True)
# 检查是否超时
if thinking_time > global_config.thinking_timeout:
print(f"\033[1;33m[警告]\033[0m 消息思考超时({thinking_time}秒),移除该消息")
container.remove_message(message_earliest)
else:# 如果不是message_thinking就只能是message_sending
print(f"\033[1;34m[调试]\033[0m 消息'{message_earliest.processed_plain_text}'正在发送中")
#直接发,等什么呢
@@ -200,7 +205,7 @@ class MessageManager:
# 安全地移除消息
if not container.remove_message(msg):
print(f"\033[1;33m[警告]\033[0m 尝试删除不存在的消息")
print("\033[1;33m[警告]\033[0m 尝试删除不存在的消息")
except Exception as e:
print(f"\033[1;31m[错误]\033[0m 处理超时消息时发生错误: {e}")
continue

View File

@@ -1,17 +1,14 @@
import time
import random
from ..schedule.schedule_generator import bot_schedule
import os
from .utils import get_embedding, combine_messages, get_recent_group_detailed_plain_text
import time
from typing import Optional
from ...common.database import Database
from .config import global_config
from .topic_identifier import topic_identifier
from ..memory_system.memory import memory_graph,hippocampus
from random import choice
import numpy as np
import jieba
from collections import Counter
from ..memory_system.memory import hippocampus, memory_graph
from ..moods.moods import MoodManager
from ..schedule.schedule_generator import bot_schedule
from .config import global_config
from .utils import get_embedding, get_recent_group_detailed_plain_text
class PromptBuilder:
def __init__(self):
@@ -25,7 +22,7 @@ class PromptBuilder:
message_txt: str,
sender_name: str = "某人",
relationship_value: float = 0.0,
group_id: int = None) -> str:
group_id: Optional[int] = None) -> tuple[str, str]:
"""构建prompt
Args:
@@ -101,7 +98,7 @@ class PromptBuilder:
for memory in relevant_memories:
memory_items.append(f"关于「{memory['topic']}」的记忆:{memory['content']}")
memory_prompt = f"看到这些聊天,你想起来:\n" + "\n".join(memory_items) + "\n"
memory_prompt = "看到这些聊天,你想起来:\n" + "\n".join(memory_items) + "\n"
# 打印调试信息
print("\n\033[1;32m[记忆检索]\033[0m 找到以下相关记忆:")
@@ -117,25 +114,41 @@ class PromptBuilder:
activate_prompt = ''
activate_prompt = f"以上是群里正在进行的聊天,{memory_prompt} 现在昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},{mood_prompt},你想要{relation_prompt_2}"
#检测机器人相关词汇
bot_keywords = ['人机', 'bot', '机器', '入机', 'robot', '机器人']
is_bot = any(keyword in message_txt.lower() for keyword in bot_keywords)
if is_bot:
is_bot_prompt = '有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认'
else:
is_bot_prompt = ''
#检测机器人相关词汇,改为关键词检测与反应功能了,提取到全局配置中
# bot_keywords = ['人机', 'bot', '机器', '入机', 'robot', '机器人']
# is_bot = any(keyword in message_txt.lower() for keyword in bot_keywords)
# if is_bot:
# is_bot_prompt = '有人说你是人机或者机器人,否定这一事实,攻击或反问他。注意,只有认为你是机器人才回复,否则不要否认'
# else:
# is_bot_prompt = ''
# 关键词检测与反应
keywords_reaction_prompt = ''
for rule in global_config.keywords_reaction_rules:
if rule.get("enable", False):
if any(keyword in message_txt.lower() for keyword in rule.get("keywords", [])):
print(f"检测到以下关键词之一:{rule.get('keywords', [])},触发反应:{rule.get('reaction', '')}")
keywords_reaction_prompt += rule.get("reaction", "") + ''
#人格选择
personality=global_config.PROMPT_PERSONALITY
probability_1 = global_config.PERSONALITY_1
probability_2 = global_config.PERSONALITY_2
probability_3 = global_config.PERSONALITY_3
prompt_personality = ''
personality_choice = random.random()
if personality_choice < 4/6: # 第一种人格
if personality_choice < probability_1: # 第一种人格
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME}{personality[0]}, 你正在浏览qq群,{promt_info_prompt},
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{is_bot_prompt}
现在请你给出日常且口语化的回复,平淡一些,尽量简短一些。{keywords_reaction_prompt}
请注意把握群里的聊天内容,不要刻意突出自身学科背景,不要回复的太有条理,可以有个性。'''
elif personality_choice < 1: # 第二种人格
elif personality_choice < probability_1 + probability_2: # 第二种人格
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME}{personality[1]}, 你正在浏览qq群{promt_info_prompt},
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{is_bot_prompt}
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
请你表达自己的见解和观点。可以有个性。'''
else: # 第三种人格
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME}{personality[2]}, 你正在浏览qq群{promt_info_prompt},
现在请你给出日常且口语化的回复,请表现你自己的见解,不要一昧迎合,尽量简短一些。{keywords_reaction_prompt}
请你表达自己的见解和观点。可以有个性。'''
#中文高手(新加的好玩功能)
@@ -163,10 +176,12 @@ class PromptBuilder:
activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。"
prompt_personality_check = ''
extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息一定要回复如果自己正在和别人聊天一定要回复其他话题如果合适搭话也可以回复如果认为应该回复请输出yes否则输出no请注意是决定是否需要回复而不是编写回复内容除了yes和no不要输出任何回复内容。"
if personality_choice < 4/6: # 第一种人格
if personality_choice < probability_1: # 第一种人格
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME}{personality[0]}, 你正在浏览qq群{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
elif personality_choice < 1: # 第二种人格
elif personality_choice < probability_1 + probability_2: # 第二种人格
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME}{personality[1]}, 你正在浏览qq群{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
else: # 第三种人格
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME}{personality[2]}, 你正在浏览qq群{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
prompt_check_if_response=f"{prompt_info}\n{prompt_date}\n{chat_talking_prompt}\n{prompt_personality_check}"
@@ -194,14 +209,16 @@ class PromptBuilder:
#激活prompt构建
activate_prompt = ''
activate_prompt = f"以上是群里正在进行的聊天。"
activate_prompt = "以上是群里正在进行的聊天。"
personality=global_config.PROMPT_PERSONALITY
prompt_personality = ''
personality_choice = random.random()
if personality_choice < 4/6: # 第一种人格
if personality_choice < probability_1: # 第一种人格
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME}{personality[0]}'''
elif personality_choice < 1: # 第二种人格
elif personality_choice < probability_1 + probability_2: # 第二种人格
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME}{personality[1]}'''
else: # 第三种人格
prompt_personality = f'''{activate_prompt}你的网名叫{global_config.BOT_NICKNAME}{personality[2]}'''
topics_str=','.join(f"\"{topics}\"")
prompt_for_select=f"你现在想在群里发言,回忆了一下,想到几个话题,分别是{topics_str},综合当前状态以及群内气氛,请你在其中选择一个合适的话题,注意只需要输出话题,除了话题什么也不要输出(双引号也不要输出)"

View File

@@ -1,8 +1,8 @@
import time
from ...common.database import Database
from nonebot.adapters.onebot.v11 import Bot
from typing import Optional, Tuple
import asyncio
from typing import Optional
from ...common.database import Database
class Impression:
traits: str = None
@@ -123,7 +123,7 @@ class RelationshipManager:
print(f"\033[1;32m[关系管理]\033[0m 已加载 {len(self.relationships)} 条关系记录")
while True:
print(f"\033[1;32m[关系管理]\033[0m 正在自动保存关系")
print("\033[1;32m[关系管理]\033[0m 正在自动保存关系")
await asyncio.sleep(300) # 等待300秒(5分钟)
await self._save_all_relationships()

View File

@@ -1,10 +1,8 @@
from typing import Dict, List, Any, Optional
import time
import threading
from collections import defaultdict
import asyncio
from .message import Message
from typing import Optional
from ...common.database import Database
from .message import Message
class MessageStorage:
def __init__(self):

View File

@@ -1,10 +1,9 @@
from typing import Optional, Dict, List
from openai import OpenAI
from .message import Message
import jieba
from typing import List, Optional
from nonebot import get_driver
from .config import global_config
from ..models.utils_model import LLM_request
from .config import global_config
driver = get_driver()
config = driver.config
@@ -26,7 +25,7 @@ class TopicIdentifier:
topic, _ = await self.llm_topic_judge.generate_response(prompt)
if not topic:
print(f"\033[1;31m[错误]\033[0m LLM API 返回为空")
print("\033[1;31m[错误]\033[0m LLM API 返回为空")
return None
# 直接在这里处理主题解析

View File

@@ -1,19 +1,18 @@
import time
import random
from typing import List
from .message import Message
import requests
import numpy as np
from .config import global_config
import re
from typing import Dict
from collections import Counter
import math
from nonebot import get_driver
from ..models.utils_model import LLM_request
import aiohttp
import random
import time
from collections import Counter
from typing import Dict, List
import jieba
import numpy as np
from nonebot import get_driver
from ..models.utils_model import LLM_request
from ..utils.typo_generator import ChineseTypoGenerator
from .config import global_config
from .message import Message
from ..moods.moods import MoodManager
driver = get_driver()
config = driver.config
@@ -328,7 +327,7 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]:
# processed_response = process_text_with_typos(content)
if len(text) > 300:
if len(text) > 200:
print(f"回复过长 ({len(text)} 字符),返回默认回复")
return ['懒得说']
# 处理长消息
@@ -338,30 +337,55 @@ def process_llm_response(text: str) -> List[str]:
tone_error_rate=0.2,
word_replace_rate=0.02
)
typoed_text = typo_generator.create_typo_sentence(text)[0]
sentences = split_into_sentences_w_remove_punctuation(typoed_text)
split_sentences = split_into_sentences_w_remove_punctuation(text)
sentences = []
for sentence in split_sentences:
typoed_text, typo_corrections = typo_generator.create_typo_sentence(sentence)
sentences.append(typoed_text)
if typo_corrections:
sentences.append(typo_corrections)
# 检查分割后的消息数量是否过多超过3条
if len(sentences) > 4:
if len(sentences) > 5:
print(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
return [f'{global_config.BOT_NICKNAME}不知道哦']
return sentences
def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1) -> float:
def calculate_typing_time(input_string: str, chinese_time: float = 0.4, english_time: float = 0.2) -> float:
"""
计算输入字符串所需的时间,中文和英文字符有不同的输入时间
input_string (str): 输入的字符串
chinese_time (float): 中文字符的输入时间默认为0.3
english_time (float): 英文字符的输入时间默认为0.15
chinese_time (float): 中文字符的输入时间默认为0.2
english_time (float): 英文字符的输入时间默认为0.1秒
特殊情况:
- 如果只有一个中文字符将使用3倍的中文输入时间
- 在所有输入结束后额外加上回车时间0.3秒
"""
mood_manager = MoodManager.get_instance()
# 将0-1的唤醒度映射到-1到1
mood_arousal = mood_manager.current_mood.arousal
# 映射到0.5到2倍的速度系数
typing_speed_multiplier = 1.5 ** mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
chinese_time *= 1/typing_speed_multiplier
english_time *= 1/typing_speed_multiplier
# 计算中文字符数
chinese_chars = sum(1 for char in input_string if '\u4e00' <= char <= '\u9fff')
# 如果只有一个中文字符使用3倍时间
if chinese_chars == 1 and len(input_string.strip()) == 1:
return chinese_time * 3 + 0.3 # 加上回车时间
# 正常计算所有字符的输入时间
total_time = 0.0
for char in input_string:
if '\u4e00' <= char <= '\u9fff': # 判断是否为中文字符
total_time += chinese_time
else: # 其他字符(如英文)
total_time += english_time
return total_time
return total_time + 0.3 # 加上回车时间
def cosine_similarity(v1, v2):

View File

@@ -1,14 +1,15 @@
import base64
import io
from PIL import Image
import hashlib
import time
import os
import time
import zlib # 用于 CRC32
from loguru import logger
from nonebot import get_driver
from PIL import Image
from ...common.database import Database
from ..chat.config import global_config
import zlib # 用于 CRC32
import base64
from nonebot import get_driver
from loguru import logger
driver = get_driver()
config = driver.config
@@ -119,7 +120,7 @@ def storage_compress_image(base64_data: str, max_size: int = 200) -> str:
# 保存记录
collection.insert_one(image_record)
print(f"\033[1;32m[成功]\033[0m 保存图片记录到数据库")
print("\033[1;32m[成功]\033[0m 保存图片记录到数据库")
except Exception as db_error:
print(f"\033[1;31m[错误]\033[0m 数据库操作失败: {str(db_error)}")

View File

@@ -1,5 +1,6 @@
from .relationship_manager import relationship_manager
from .config import global_config
from .relationship_manager import relationship_manager
def get_user_nickname(user_id: int) -> str:
if int(user_id) == int(global_config.BOT_QQ):

View File

@@ -1,4 +1,6 @@
import asyncio
from .config import global_config
class WillingManager:
def __init__(self):
@@ -37,11 +39,14 @@ class WillingManager:
current_willing *= 0.1
print(f"表情包, 当前意愿: {current_willing}")
print(f"放大系数_interested_rate: {global_config.response_interested_rate_amplifier}")
interested_rate *= global_config.response_interested_rate_amplifier #放大回复兴趣度
if interested_rate > 0.4:
print(f"兴趣度: {interested_rate}, 当前意愿: {current_willing}")
current_willing += interested_rate-0.1
# print(f"兴趣度: {interested_rate}, 当前意愿: {current_willing}")
current_willing += interested_rate-0.4
self.group_reply_willing[group_id] = min(current_willing, 3.0)
current_willing *= global_config.response_willing_amplifier #放大回复意愿
# print(f"放大系数_willing: {global_config.response_willing_amplifier}, 当前意愿: {current_willing}")
reply_probability = max((current_willing - 0.45) * 2, 0)
if group_id not in config.talk_allowed_groups:
@@ -49,11 +54,14 @@ class WillingManager:
reply_probability = 0
if group_id in config.talk_frequency_down_groups:
reply_probability = reply_probability / 3.5
reply_probability = reply_probability / global_config.down_frequency_rate
reply_probability = min(reply_probability, 1)
if reply_probability < 0:
reply_probability = 0
self.group_reply_willing[group_id] = min(current_willing, 3.0)
return reply_probability
def change_reply_willing_sent(self, group_id: int):

View File

@@ -1,8 +1,8 @@
import os
import sys
import numpy as np
import requests
import time
import requests
from dotenv import load_dotenv
# 添加项目根目录到 Python 路径

View File

@@ -1,19 +1,12 @@
# -*- coding: utf-8 -*-
import os
import sys
import jieba
import networkx as nx
import matplotlib.pyplot as plt
import math
from collections import Counter
import datetime
import random
import time
import jieba
import matplotlib.pyplot as plt
import networkx as nx
from dotenv import load_dotenv
import sys
import asyncio
import aiohttp
from typing import Tuple
sys.path.append("C:/GitHub/MaiMBot") # 添加项目根目录到 Python 路径
from src.common.database import Database # 使用正确的导入语法

View File

@@ -1,20 +1,21 @@
# -*- coding: utf-8 -*-
import os
import jieba
import networkx as nx
import matplotlib.pyplot as plt
from collections import Counter
import datetime
import math
import random
import time
import jieba
import networkx as nx
from ...common.database import Database # 使用正确的导入语法
from ..chat.config import global_config
from ...common.database import Database # 使用正确的导入语法
from ..chat.utils import (
calculate_information_content,
cosine_similarity,
get_cloest_chat_from_db,
text_to_vector,
)
from ..models.utils_model import LLM_request
import math
from ..chat.utils import calculate_information_content, get_cloest_chat_from_db ,text_to_vector,cosine_similarity
class Memory_graph:
@@ -530,7 +531,8 @@ class Hippocampus:
# 计算每个识别出的主题与记忆主题的相似度
for topic in topics:
if debug_info:
print(f"\033[1;32m[{debug_info}]\033[0m 正在思考有没有见过: {topic}")
# print(f"\033[1;32m[{debug_info}]\033[0m 正在思考有没有见过: {topic}")
pass
topic_vector = text_to_vector(topic)
has_similar_topic = False
@@ -548,11 +550,13 @@ class Hippocampus:
if similarity >= similarity_threshold:
has_similar_topic = True
if debug_info:
print(f"\033[1;32m[{debug_info}]\033[0m 找到相似主题: {topic} -> {memory_topic} (相似度: {similarity:.2f})")
# print(f"\033[1;32m[{debug_info}]\033[0m 找到相似主题: {topic} -> {memory_topic} (相似度: {similarity:.2f})")
pass
all_similar_topics.append((memory_topic, similarity))
if not has_similar_topic and debug_info:
print(f"\033[1;31m[{debug_info}]\033[0m 没有见过: {topic} ,呃呃")
# print(f"\033[1;31m[{debug_info}]\033[0m 没有见过: {topic} ,呃呃")
pass
return all_similar_topics
@@ -696,6 +700,7 @@ def segment_text(text):
from nonebot import get_driver
driver = get_driver()
config = driver.config

View File

@@ -1,21 +1,22 @@
# -*- coding: utf-8 -*-
import sys
import jieba
import networkx as nx
import matplotlib.pyplot as plt
import math
from collections import Counter
import datetime
import random
import time
import math
import os
from dotenv import load_dotenv
import pymongo
from loguru import logger
import random
import sys
import time
from collections import Counter
from pathlib import Path
import matplotlib.pyplot as plt
import networkx as nx
import pymongo
from dotenv import load_dotenv
from loguru import logger
# from chat.config import global_config
sys.path.append("C:/GitHub/MaiMBot") # 添加项目根目录到 Python 路径
from src.common.database import Database
from src.common.database import Database
from src.plugins.memory_system.offline_llm import LLMModel
# 获取当前文件的目录
@@ -102,7 +103,7 @@ def get_cloest_chat_from_db(db, length: int, timestamp: str):
# 检查当前记录的memorized值
current_memorized = record.get('memorized', 0)
if current_memorized > 3:
print(f"消息已读取3次跳过")
print("消息已读取3次跳过")
return ''
# 更新memorized值
@@ -114,7 +115,7 @@ def get_cloest_chat_from_db(db, length: int, timestamp: str):
chat_text += record["detailed_plain_text"]
return chat_text
print(f"消息已读取3次跳过")
print("消息已读取3次跳过")
return ''
class Memory_graph:

View File

@@ -1,11 +1,13 @@
import os
import requests
from typing import Tuple, Union
import time
import aiohttp
import asyncio
import os
import time
from typing import Tuple, Union
import aiohttp
import requests
from loguru import logger
class LLMModel:
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name

View File

@@ -1,16 +1,16 @@
import aiohttp
import asyncio
import json
import requests
import time
import re
from datetime import datetime
from typing import Tuple, Union
from nonebot import get_driver
import aiohttp
from loguru import logger
from nonebot import get_driver
from ...common.database import Database
from ..chat.config import global_config
from ..chat.utils_image import compress_base64_image_by_scale
from datetime import datetime
from ...common.database import Database
driver = get_driver()
config = driver.config
@@ -182,6 +182,13 @@ class LLM_request:
continue
elif response.status in policy["abort_codes"]:
logger.error(f"错误码: {response.status} - {error_code_mapping.get(response.status)}")
if response.status == 403 :
if global_config.llm_normal == "Pro/deepseek-ai/DeepSeek-V3":
logger.error("可能是没有给硅基流动充钱普通模型自动退化至非Pro模型反应速度可能会变慢")
global_config.llm_normal = "deepseek-ai/DeepSeek-V3"
if global_config.llm_reasoning == "Pro/deepseek-ai/DeepSeek-R1":
logger.error("可能是没有给硅基流动充钱推理模型自动退化至非Pro模型反应速度可能会变慢")
global_config.llm_reasoning = "deepseek-ai/DeepSeek-R1"
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
response.raise_for_status()
@@ -227,7 +234,7 @@ class LLM_request:
await asyncio.sleep(wait_time)
else:
logger.critical(f"请求失败: {str(e)}")
logger.critical(f"请求头: {await self._build_headers()} 请求体: {payload}")
logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {payload}")
raise RuntimeError(f"API请求失败: {str(e)}")
logger.error("达到最大重试次数,请求仍然失败")
@@ -325,12 +332,19 @@ class LLM_request:
reasoning = ""
return content, reasoning
async def _build_headers(self) -> dict:
async def _build_headers(self, no_key: bool = False) -> dict:
"""构建请求头"""
return {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
if no_key:
return {
"Authorization": f"Bearer **********",
"Content-Type": "application/json"
}
else:
return {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
# 防止小朋友们截图自己的key
async def generate_response(self, prompt: str) -> Tuple[str, str]:
"""根据输入的提示生成模型的异步响应"""

View File

@@ -1,10 +1,11 @@
import math
import time
import threading
from typing import Dict, Tuple, Optional
import time
from dataclasses import dataclass
from ..chat.config import global_config
@dataclass
class MoodState:
valence: float # 愉悦度 (-1 到 1)

View File

@@ -1,12 +1,15 @@
import datetime
import os
from typing import List, Dict, Union
from ...common.database import Database # 使用正确的导入语法
from src.plugins.chat.config import global_config
from nonebot import get_driver
from ..models.utils_model import LLM_request
from loguru import logger
import json
from typing import Dict, Union
from loguru import logger
from nonebot import get_driver
from src.plugins.chat.config import global_config
from ...common.database import Database # 使用正确的导入语法
from ..models.utils_model import LLM_request
driver = get_driver()
config = driver.config

View File

@@ -1,11 +1,12 @@
from typing import Dict, List, Any
import time
import threading
import json
from datetime import datetime, timedelta
import time
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Any, Dict
from ...common.database import Database
class LLMStatistics:
def __init__(self, output_file: str = "llm_statistics.txt"):
"""初始化LLM统计类

View File

@@ -2,15 +2,17 @@
错别字生成器 - 基于拼音和字频的中文错别字生成工具
"""
from pypinyin import pinyin, Style
from collections import defaultdict
import json
import os
import jieba
from pathlib import Path
import random
import math
import os
import random
import time
from collections import defaultdict
from pathlib import Path
import jieba
from pypinyin import Style, pinyin
class ChineseTypoGenerator:
def __init__(self,
@@ -282,10 +284,13 @@ class ChineseTypoGenerator:
返回:
typo_sentence: 包含错别字的句子
typo_info: 错别字信息列表
correction_suggestion: 随机选择的一个纠正建议,返回正确的字/词
"""
result = []
typo_info = []
word_typos = [] # 记录词语错误对(错词,正确词)
char_typos = [] # 记录单字错误对(错字,正确字)
current_pos = 0
# 分词
words = self._segment_sentence(sentence)
@@ -294,6 +299,7 @@ class ChineseTypoGenerator:
# 如果是标点符号或空格,直接添加
if all(not self._is_chinese_char(c) for c in word):
result.append(word)
current_pos += len(word)
continue
# 获取词语的拼音
@@ -314,6 +320,8 @@ class ChineseTypoGenerator:
' '.join(word_pinyin),
' '.join(self._get_word_pinyin(typo_word)),
orig_freq, typo_freq))
word_typos.append((typo_word, word)) # 记录(错词,正确词)对
current_pos += len(typo_word)
continue
# 如果不进行整词替换,则进行单字替换
@@ -331,11 +339,15 @@ class ChineseTypoGenerator:
result.append(typo_char)
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
char_typos.append((typo_char, char)) # 记录(错字,正确字)对
current_pos += 1
continue
result.append(char)
current_pos += 1
else:
# 处理多字词的单字替换
word_result = []
word_start_pos = current_pos
for i, (char, py) in enumerate(zip(word, word_pinyin)):
# 词中的字替换概率降低
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))
@@ -351,11 +363,24 @@ class ChineseTypoGenerator:
word_result.append(typo_char)
typo_py = pinyin(typo_char, style=Style.TONE3)[0][0]
typo_info.append((char, typo_char, py, typo_py, orig_freq, typo_freq))
char_typos.append((typo_char, char)) # 记录(错字,正确字)对
continue
word_result.append(char)
result.append(''.join(word_result))
current_pos += len(word)
return ''.join(result), typo_info
# 优先从词语错误中选择,如果没有则从单字错误中选择
correction_suggestion = None
# 50%概率返回纠正建议
if random.random() < 0.5:
if word_typos:
wrong_word, correct_word = random.choice(word_typos)
correction_suggestion = correct_word
elif char_typos:
wrong_char, correct_char = random.choice(char_typos)
correction_suggestion = correct_char
return ''.join(result), correction_suggestion
def format_typo_info(self, typo_info):
"""
@@ -417,16 +442,16 @@ def main():
# 创建包含错别字的句子
start_time = time.time()
typo_sentence, typo_info = typo_generator.create_typo_sentence(sentence)
typo_sentence, correction_suggestion = typo_generator.create_typo_sentence(sentence)
# 打印结果
print("\n原句:", sentence)
print("错字版:", typo_sentence)
# 打印错别字信息
if typo_info:
print("\n错别字信息")
print(typo_generator.format_typo_info(typo_info))
# 打印纠正建议
if correction_suggestion:
print("\n随机纠正建议")
print(f"应该改为:{correction_suggestion}")
# 计算并打印总耗时
end_time = time.time()