Merge branch 'dev' of https://github.com/MaiM-with-u/MaiBot into dev
This commit is contained in:
@@ -44,7 +44,7 @@ else:
|
||||
|
||||
def update_config():
|
||||
# 获取根目录路径
|
||||
root_dir = Path(__file__).parent.parent.parent.parent
|
||||
root_dir = Path(__file__).parent.parent.parent
|
||||
template_dir = root_dir / "template"
|
||||
config_dir = root_dir / "config"
|
||||
old_config_dir = config_dir / "old"
|
||||
@@ -305,7 +305,7 @@ class BotConfig:
|
||||
def get_config_dir() -> str:
|
||||
"""获取配置文件目录"""
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
root_dir = os.path.abspath(os.path.join(current_dir, "..", "..", ".."))
|
||||
root_dir = os.path.abspath(os.path.join(current_dir, "..", ".."))
|
||||
config_dir = os.path.join(root_dir, "config")
|
||||
if not os.path.exists(config_dir):
|
||||
os.makedirs(config_dir)
|
||||
@@ -1,5 +1,5 @@
|
||||
from src.do_tool.tool_can_use.base_tool import BaseTool
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from src.common.logger import get_module_logger
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
|
||||
|
||||
@@ -9,7 +9,7 @@ logger = get_module_logger("mid_chat_mem_tool")
|
||||
class GetMemoryTool(BaseTool):
|
||||
"""从记忆系统中获取相关记忆的工具"""
|
||||
|
||||
name = "mid_chat_mem"
|
||||
name = "get_memory"
|
||||
description = "从记忆系统中获取相关记忆"
|
||||
parameters = {
|
||||
"type": "object",
|
||||
@@ -49,10 +49,10 @@ class GetMemoryTool(BaseTool):
|
||||
else:
|
||||
content = f"你不太记得有关{text}的记忆,你对此不太了解"
|
||||
|
||||
return {"name": "mid_chat_mem", "content": content}
|
||||
return {"name": "get_memory", "content": content}
|
||||
except Exception as e:
|
||||
logger.error(f"记忆获取工具执行失败: {str(e)}")
|
||||
return {"name": "mid_chat_mem", "content": f"记忆获取失败: {str(e)}"}
|
||||
return {"name": "get_memory", "content": f"记忆获取失败: {str(e)}"}
|
||||
|
||||
|
||||
# 注册工具
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
from src.common.database import db
|
||||
import time
|
||||
@@ -7,6 +7,8 @@ import json
|
||||
from src.common.logger import get_module_logger, TOOL_USE_STYLE_CONFIG, LogConfig
|
||||
from src.do_tool.tool_can_use import get_all_tool_definitions, get_tool_instance
|
||||
from src.heart_flow.sub_heartflow import SubHeartflow
|
||||
import traceback
|
||||
from src.plugins.chat.utils import get_recent_group_detailed_plain_text
|
||||
|
||||
tool_use_config = LogConfig(
|
||||
# 使用消息发送专用样式
|
||||
@@ -41,6 +43,12 @@ class ToolUser:
|
||||
else:
|
||||
mid_memory_info = ""
|
||||
|
||||
stream_id = chat_stream.stream_id
|
||||
chat_talking_prompt = ""
|
||||
if stream_id:
|
||||
chat_talking_prompt = get_recent_group_detailed_plain_text(
|
||||
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
||||
)
|
||||
new_messages = list(
|
||||
db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15)
|
||||
)
|
||||
@@ -54,9 +62,10 @@ class ToolUser:
|
||||
prompt = ""
|
||||
prompt += mid_memory_info
|
||||
prompt += "你正在思考如何回复群里的消息。\n"
|
||||
prompt += "之前群里进行了如下讨论:\n"
|
||||
prompt += chat_talking_prompt
|
||||
prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
|
||||
prompt += f"注意你就是{bot_name},{bot_name}指的就是你。"
|
||||
|
||||
prompt += f"注意你就是{bot_name},{bot_name}是你的名字。根据之前的聊天记录补充问题信息,搜索时避开你的名字。\n"
|
||||
prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
|
||||
return prompt
|
||||
|
||||
@@ -188,6 +197,7 @@ class ToolUser:
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"工具调用过程中出错: {str(e)}")
|
||||
logger.error(f"工具调用过程中出错: {traceback.format_exc()}")
|
||||
return {
|
||||
"used_tools": False,
|
||||
"error": str(e),
|
||||
|
||||
@@ -1,378 +0,0 @@
|
||||
# import customtkinter as ctk
|
||||
# import subprocess
|
||||
# import threading
|
||||
# import queue
|
||||
# import re
|
||||
# import os
|
||||
# import signal
|
||||
# from collections import deque
|
||||
# import sys
|
||||
|
||||
# # 设置应用的外观模式和默认颜色主题
|
||||
# ctk.set_appearance_mode("dark")
|
||||
# ctk.set_default_color_theme("blue")
|
||||
|
||||
|
||||
# class LogViewerApp(ctk.CTk):
|
||||
# """日志查看器应用的主类,继承自customtkinter的CTk类"""
|
||||
|
||||
# def __init__(self):
|
||||
# """初始化日志查看器应用的界面和状态"""
|
||||
# super().__init__()
|
||||
# self.title("日志查看器")
|
||||
# self.geometry("1200x800")
|
||||
|
||||
# # 标记GUI是否运行中
|
||||
# self.is_running = True
|
||||
|
||||
# # 程序关闭时的清理操作
|
||||
# self.protocol("WM_DELETE_WINDOW", self._on_closing)
|
||||
|
||||
# # 初始化进程、日志队列、日志数据等变量
|
||||
# self.process = None
|
||||
# self.log_queue = queue.Queue()
|
||||
# self.log_data = deque(maxlen=10000) # 使用固定长度队列
|
||||
# self.available_levels = set()
|
||||
# self.available_modules = set()
|
||||
# self.sorted_modules = []
|
||||
# self.module_checkboxes = {} # 存储模块复选框的字典
|
||||
|
||||
# # 日志颜色配置
|
||||
# self.color_config = {
|
||||
# "time": "#888888",
|
||||
# "DEBUG": "#2196F3",
|
||||
# "INFO": "#4CAF50",
|
||||
# "WARNING": "#FF9800",
|
||||
# "ERROR": "#F44336",
|
||||
# "module": "#D4D0AB",
|
||||
# "default": "#FFFFFF",
|
||||
# }
|
||||
|
||||
# # 列可见性配置
|
||||
# self.column_visibility = {"show_time": True, "show_level": True, "show_module": True}
|
||||
|
||||
# # 选中的日志等级和模块
|
||||
# self.selected_levels = set()
|
||||
# self.selected_modules = set()
|
||||
|
||||
# # 创建界面组件并启动日志队列处理
|
||||
# self.create_widgets()
|
||||
# self.after(100, self.process_log_queue)
|
||||
|
||||
# def create_widgets(self):
|
||||
# """创建应用界面的各个组件"""
|
||||
# self.grid_columnconfigure(0, weight=1)
|
||||
# self.grid_rowconfigure(1, weight=1)
|
||||
|
||||
# # 控制面板
|
||||
# control_frame = ctk.CTkFrame(self)
|
||||
# control_frame.grid(row=0, column=0, sticky="ew", padx=10, pady=5)
|
||||
|
||||
# self.start_btn = ctk.CTkButton(control_frame, text="启动", command=self.start_process)
|
||||
# self.start_btn.pack(side="left", padx=5)
|
||||
|
||||
# self.stop_btn = ctk.CTkButton(control_frame, text="停止", command=self.stop_process, state="disabled")
|
||||
# self.stop_btn.pack(side="left", padx=5)
|
||||
|
||||
# self.clear_btn = ctk.CTkButton(control_frame, text="清屏", command=self.clear_logs)
|
||||
# self.clear_btn.pack(side="left", padx=5)
|
||||
|
||||
# column_filter_frame = ctk.CTkFrame(control_frame)
|
||||
# column_filter_frame.pack(side="left", padx=20)
|
||||
|
||||
# self.time_check = ctk.CTkCheckBox(column_filter_frame, text="显示时间", command=self.refresh_logs)
|
||||
# self.time_check.pack(side="left", padx=5)
|
||||
# self.time_check.select()
|
||||
|
||||
# self.level_check = ctk.CTkCheckBox(column_filter_frame, text="显示等级", command=self.refresh_logs)
|
||||
# self.level_check.pack(side="left", padx=5)
|
||||
# self.level_check.select()
|
||||
|
||||
# self.module_check = ctk.CTkCheckBox(column_filter_frame, text="显示模块", command=self.refresh_logs)
|
||||
# self.module_check.pack(side="left", padx=5)
|
||||
# self.module_check.select()
|
||||
|
||||
# # 筛选面板
|
||||
# filter_frame = ctk.CTkFrame(self)
|
||||
# filter_frame.grid(row=0, column=1, rowspan=2, sticky="ns", padx=5)
|
||||
|
||||
# ctk.CTkLabel(filter_frame, text="日志等级筛选").pack(pady=5)
|
||||
# self.level_scroll = ctk.CTkScrollableFrame(filter_frame, width=150, height=200)
|
||||
# self.level_scroll.pack(fill="both", expand=True, padx=5)
|
||||
|
||||
# ctk.CTkLabel(filter_frame, text="模块筛选").pack(pady=5)
|
||||
# self.module_filter_entry = ctk.CTkEntry(filter_frame, placeholder_text="输入模块过滤词")
|
||||
# self.module_filter_entry.pack(pady=5)
|
||||
# self.module_filter_entry.bind("<KeyRelease>", self.update_module_filter)
|
||||
|
||||
# self.module_scroll = ctk.CTkScrollableFrame(filter_frame, width=300, height=200)
|
||||
# self.module_scroll.pack(fill="both", expand=True, padx=5)
|
||||
|
||||
# self.log_text = ctk.CTkTextbox(self, wrap="word")
|
||||
# self.log_text.grid(row=1, column=0, sticky="nsew", padx=10, pady=5)
|
||||
|
||||
# self.init_text_tags()
|
||||
|
||||
# def update_module_filter(self, event):
|
||||
# """根据模块过滤词更新模块复选框的显示"""
|
||||
# filter_text = self.module_filter_entry.get().strip().lower()
|
||||
# for module, checkbox in self.module_checkboxes.items():
|
||||
# if filter_text in module.lower():
|
||||
# checkbox.pack(anchor="w", padx=5, pady=2)
|
||||
# else:
|
||||
# checkbox.pack_forget()
|
||||
|
||||
# def update_filters(self, level, module):
|
||||
# """更新日志等级和模块的筛选器"""
|
||||
# if level not in self.available_levels:
|
||||
# self.available_levels.add(level)
|
||||
# self.add_checkbox(self.level_scroll, level, "level")
|
||||
|
||||
# module_key = self.get_module_key(module)
|
||||
# if module_key not in self.available_modules:
|
||||
# self.available_modules.add(module_key)
|
||||
# self.sorted_modules = sorted(self.available_modules, key=lambda x: x.lower())
|
||||
# self.rebuild_module_checkboxes()
|
||||
|
||||
# def rebuild_module_checkboxes(self):
|
||||
# """重新构建模块复选框"""
|
||||
# # 清空现有复选框
|
||||
# for widget in self.module_scroll.winfo_children():
|
||||
# widget.destroy()
|
||||
# self.module_checkboxes.clear()
|
||||
|
||||
# # 重建排序后的复选框
|
||||
# for module in self.sorted_modules:
|
||||
# self.add_checkbox(self.module_scroll, module, "module")
|
||||
|
||||
# def add_checkbox(self, parent, text, type_):
|
||||
# """在指定父组件中添加复选框"""
|
||||
|
||||
# def update_filter():
|
||||
# current = cb.get()
|
||||
# if type_ == "level":
|
||||
# (self.selected_levels.add if current else self.selected_levels.discard)(text)
|
||||
# else:
|
||||
# (self.selected_modules.add if current else self.selected_modules.discard)(text)
|
||||
# self.refresh_logs()
|
||||
|
||||
# cb = ctk.CTkCheckBox(parent, text=text, command=update_filter)
|
||||
# cb.select() # 初始选中
|
||||
|
||||
# # 手动同步初始状态到集合(关键修复)
|
||||
# if type_ == "level":
|
||||
# self.selected_levels.add(text)
|
||||
# else:
|
||||
# self.selected_modules.add(text)
|
||||
|
||||
# if type_ == "module":
|
||||
# self.module_checkboxes[text] = cb
|
||||
# cb.pack(anchor="w", padx=5, pady=2)
|
||||
# return cb
|
||||
|
||||
# def check_filter(self, entry):
|
||||
# """检查日志条目是否符合当前筛选条件"""
|
||||
# level_ok = not self.selected_levels or entry["level"] in self.selected_levels
|
||||
# module_key = self.get_module_key(entry["module"])
|
||||
# module_ok = not self.selected_modules or module_key in self.selected_modules
|
||||
# return level_ok and module_ok
|
||||
|
||||
# def init_text_tags(self):
|
||||
# """初始化日志文本的颜色标签"""
|
||||
# for tag, color in self.color_config.items():
|
||||
# self.log_text.tag_config(tag, foreground=color)
|
||||
# self.log_text.tag_config("default", foreground=self.color_config["default"])
|
||||
|
||||
# def start_process(self):
|
||||
# """启动日志进程并开始读取输出"""
|
||||
# self.process = subprocess.Popen(
|
||||
# ["nb", "run"],
|
||||
# stdout=subprocess.PIPE,
|
||||
# stderr=subprocess.STDOUT,
|
||||
# text=True,
|
||||
# bufsize=1,
|
||||
# encoding="utf-8",
|
||||
# errors="ignore",
|
||||
# )
|
||||
# self.start_btn.configure(state="disabled")
|
||||
# self.stop_btn.configure(state="normal")
|
||||
# threading.Thread(target=self.read_output, daemon=True).start()
|
||||
|
||||
# def stop_process(self):
|
||||
# """停止日志进程并清理相关资源"""
|
||||
# if self.process:
|
||||
# try:
|
||||
# if hasattr(self.process, "pid"):
|
||||
# if os.name == "nt":
|
||||
# subprocess.run(
|
||||
# ["taskkill", "/F", "/T", "/PID", str(self.process.pid)], check=True, capture_output=True
|
||||
# )
|
||||
# else:
|
||||
# os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
|
||||
# except (subprocess.CalledProcessError, ProcessLookupError, OSError) as e:
|
||||
# print(f"终止进程失败: {e}")
|
||||
# finally:
|
||||
# self.process = None
|
||||
# self.log_queue.queue.clear()
|
||||
# self.start_btn.configure(state="normal")
|
||||
# self.stop_btn.configure(state="disabled")
|
||||
# self.refresh_logs()
|
||||
|
||||
# def read_output(self):
|
||||
# """读取日志进程的输出并放入队列"""
|
||||
# try:
|
||||
# while self.process and self.process.poll() is None and self.is_running:
|
||||
# line = self.process.stdout.readline()
|
||||
# if line:
|
||||
# self.log_queue.put(line)
|
||||
# else:
|
||||
# break # 避免空循环
|
||||
# self.process.stdout.close() # 确保关闭文件描述符
|
||||
# except ValueError: # 处理可能的I/O操作异常
|
||||
# pass
|
||||
|
||||
# def process_log_queue(self):
|
||||
# """处理日志队列中的日志条目"""
|
||||
# while not self.log_queue.empty():
|
||||
# line = self.log_queue.get()
|
||||
# self.process_log_line(line)
|
||||
|
||||
# # 仅在GUI仍在运行时继续处理队列
|
||||
# if self.is_running:
|
||||
# self.after(100, self.process_log_queue)
|
||||
|
||||
# def process_log_line(self, line):
|
||||
# """解析单行日志并更新日志数据和筛选器"""
|
||||
# match = re.match(
|
||||
# r"""^
|
||||
# (?:(?P<time>\d{2}:\d{2}(?::\d{2})?)\s*\|\s*)?
|
||||
# (?P<level>\w+)\s*\|\s*
|
||||
# (?P<module>.*?)
|
||||
# \s*[-|]\s*
|
||||
# (?P<message>.*)
|
||||
# $""",
|
||||
# line.strip(),
|
||||
# re.VERBOSE,
|
||||
# )
|
||||
|
||||
# if match:
|
||||
# groups = match.groupdict()
|
||||
# time = groups.get("time", "")
|
||||
# level = groups.get("level", "OTHER")
|
||||
# module = groups.get("module", "UNKNOWN").strip()
|
||||
# message = groups.get("message", "").strip()
|
||||
# raw_line = line
|
||||
# else:
|
||||
# time, level, module, message = "", "OTHER", "UNKNOWN", line
|
||||
# raw_line = line
|
||||
|
||||
# self.update_filters(level, module)
|
||||
# log_entry = {"raw": raw_line, "time": time, "level": level, "module": module, "message": message}
|
||||
# self.log_data.append(log_entry)
|
||||
|
||||
# if self.check_filter(log_entry):
|
||||
# self.display_log(log_entry)
|
||||
|
||||
# def get_module_key(self, module_name):
|
||||
# """获取模块名称的标准化键"""
|
||||
# cleaned = module_name.strip()
|
||||
# return re.sub(r":\d+$", "", cleaned)
|
||||
|
||||
# def display_log(self, entry):
|
||||
# """在日志文本框中显示日志条目"""
|
||||
# parts = []
|
||||
# tags = []
|
||||
|
||||
# if self.column_visibility["show_time"] and entry["time"]:
|
||||
# parts.append(f"{entry['time']} ")
|
||||
# tags.append("time")
|
||||
|
||||
# if self.column_visibility["show_level"]:
|
||||
# level_tag = entry["level"] if entry["level"] in self.color_config else "default"
|
||||
# parts.append(f"{entry['level']:<8} ")
|
||||
# tags.append(level_tag)
|
||||
|
||||
# if self.column_visibility["show_module"]:
|
||||
# parts.append(f"{entry['module']} ")
|
||||
# tags.append("module")
|
||||
|
||||
# parts.append(f"- {entry['message']}\n")
|
||||
# tags.append("default")
|
||||
|
||||
# self.log_text.configure(state="normal")
|
||||
# for part, tag in zip(parts, tags):
|
||||
# self.log_text.insert("end", part, tag)
|
||||
# self.log_text.see("end")
|
||||
# self.log_text.configure(state="disabled")
|
||||
|
||||
# def refresh_logs(self):
|
||||
# """刷新日志显示,根据筛选条件重新显示日志"""
|
||||
# self.column_visibility = {
|
||||
# "show_time": self.time_check.get(),
|
||||
# "show_level": self.level_check.get(),
|
||||
# "show_module": self.module_check.get(),
|
||||
# }
|
||||
|
||||
# self.log_text.configure(state="normal")
|
||||
# self.log_text.delete("1.0", "end")
|
||||
|
||||
# filtered_logs = [entry for entry in self.log_data if self.check_filter(entry)]
|
||||
|
||||
# for entry in filtered_logs:
|
||||
# parts = []
|
||||
# tags = []
|
||||
|
||||
# if self.column_visibility["show_time"] and entry["time"]:
|
||||
# parts.append(f"{entry['time']} ")
|
||||
# tags.append("time")
|
||||
|
||||
# if self.column_visibility["show_level"]:
|
||||
# level_tag = entry["level"] if entry["level"] in self.color_config else "default"
|
||||
# parts.append(f"{entry['level']:<8} ")
|
||||
# tags.append(level_tag)
|
||||
|
||||
# if self.column_visibility["show_module"]:
|
||||
# parts.append(f"{entry['module']} ")
|
||||
# tags.append("module")
|
||||
|
||||
# parts.append(f"- {entry['message']}\n")
|
||||
# tags.append("default")
|
||||
|
||||
# for part, tag in zip(parts, tags):
|
||||
# self.log_text.insert("end", part, tag)
|
||||
|
||||
# self.log_text.see("end")
|
||||
# self.log_text.configure(state="disabled")
|
||||
|
||||
# def clear_logs(self):
|
||||
# """清空日志文本框中的内容"""
|
||||
# self.log_text.configure(state="normal")
|
||||
# self.log_text.delete("1.0", "end")
|
||||
# self.log_text.configure(state="disabled")
|
||||
|
||||
# def _on_closing(self):
|
||||
# """处理窗口关闭事件,安全清理资源"""
|
||||
# # 标记GUI已关闭
|
||||
# self.is_running = False
|
||||
|
||||
# # 停止日志进程
|
||||
# self.stop_process()
|
||||
|
||||
# # 安全清理tkinter变量
|
||||
# for attr_name in list(self.__dict__.keys()):
|
||||
# if isinstance(getattr(self, attr_name), (ctk.Variable, ctk.StringVar, ctk.IntVar, ctk.DoubleVar, ctk.BooleanVar)):
|
||||
# try:
|
||||
# var = getattr(self, attr_name)
|
||||
# var.set(None)
|
||||
# except Exception:
|
||||
# pass
|
||||
# setattr(self, attr_name, None)
|
||||
|
||||
# self.quit()
|
||||
# sys.exit(0)
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# # 启动日志查看器应用
|
||||
# app = LogViewerApp()
|
||||
# app.mainloop()
|
||||
@@ -1,342 +0,0 @@
|
||||
# import os
|
||||
# import queue
|
||||
# import sys
|
||||
# import threading
|
||||
# import time
|
||||
# from datetime import datetime
|
||||
# from typing import Dict, List
|
||||
# from typing import Optional
|
||||
|
||||
# sys.path.insert(0, sys.path[0] + "/../")
|
||||
# sys.path.insert(0, sys.path[0] + "/../")
|
||||
# from src.common.logger import get_module_logger
|
||||
|
||||
# import customtkinter as ctk
|
||||
# from dotenv import load_dotenv
|
||||
|
||||
# logger = get_module_logger("gui")
|
||||
|
||||
# # 获取当前文件的目录
|
||||
# current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# # 获取项目根目录
|
||||
# root_dir = os.path.abspath(os.path.join(current_dir, "..", ".."))
|
||||
# sys.path.insert(0, root_dir)
|
||||
# from src.common.database import db # noqa: E402
|
||||
|
||||
# # 加载环境变量
|
||||
# if os.path.exists(os.path.join(root_dir, ".env.dev")):
|
||||
# load_dotenv(os.path.join(root_dir, ".env.dev"))
|
||||
# logger.info("成功加载开发环境配置")
|
||||
# elif os.path.exists(os.path.join(root_dir, ".env")):
|
||||
# load_dotenv(os.path.join(root_dir, ".env"))
|
||||
# logger.info("成功加载生产环境配置")
|
||||
# else:
|
||||
# logger.error("未找到环境配置文件")
|
||||
# sys.exit(1)
|
||||
|
||||
|
||||
# class ReasoningGUI:
|
||||
# def __init__(self):
|
||||
# # 记录启动时间戳,转换为Unix时间戳
|
||||
# self.start_timestamp = datetime.now().timestamp()
|
||||
# logger.info(f"程序启动时间戳: {self.start_timestamp}")
|
||||
|
||||
# # 设置主题
|
||||
# ctk.set_appearance_mode("dark")
|
||||
# ctk.set_default_color_theme("blue")
|
||||
|
||||
# # 创建主窗口
|
||||
# self.root = ctk.CTk()
|
||||
# self.root.title("麦麦推理")
|
||||
# self.root.geometry("800x600")
|
||||
# self.root.protocol("WM_DELETE_WINDOW", self._on_closing)
|
||||
|
||||
# # 存储群组数据
|
||||
# self.group_data: Dict[str, List[dict]] = {}
|
||||
|
||||
# # 创建更新队列
|
||||
# self.update_queue = queue.Queue()
|
||||
|
||||
# # 创建主框架
|
||||
# self.frame = ctk.CTkFrame(self.root)
|
||||
# self.frame.pack(pady=20, padx=20, fill="both", expand=True)
|
||||
|
||||
# # 添加标题
|
||||
# self.title = ctk.CTkLabel(self.frame, text="麦麦的脑内所想", font=("Arial", 24))
|
||||
# self.title.pack(pady=10, padx=10)
|
||||
|
||||
# # 创建左右分栏
|
||||
# self.paned = ctk.CTkFrame(self.frame)
|
||||
# self.paned.pack(fill="both", expand=True, padx=10, pady=10)
|
||||
|
||||
# # 左侧群组列表
|
||||
# self.left_frame = ctk.CTkFrame(self.paned, width=200)
|
||||
# self.left_frame.pack(side="left", fill="y", padx=5, pady=5)
|
||||
|
||||
# self.group_label = ctk.CTkLabel(self.left_frame, text="群组列表", font=("Arial", 16))
|
||||
# self.group_label.pack(pady=5)
|
||||
|
||||
# # 创建可滚动框架来容纳群组按钮
|
||||
# self.group_scroll_frame = ctk.CTkScrollableFrame(self.left_frame, width=180, height=400)
|
||||
# self.group_scroll_frame.pack(pady=5, padx=5, fill="both", expand=True)
|
||||
|
||||
# # 存储群组按钮的字典
|
||||
# self.group_buttons: Dict[str, ctk.CTkButton] = {}
|
||||
# # 当前选中的群组ID
|
||||
# self.selected_group_id: Optional[str] = None
|
||||
|
||||
# # 右侧内容显示
|
||||
# self.right_frame = ctk.CTkFrame(self.paned)
|
||||
# self.right_frame.pack(side="right", fill="both", expand=True, padx=5, pady=5)
|
||||
|
||||
# self.content_label = ctk.CTkLabel(self.right_frame, text="推理内容", font=("Arial", 16))
|
||||
# self.content_label.pack(pady=5)
|
||||
|
||||
# # 创建富文本显示框
|
||||
# self.content_text = ctk.CTkTextbox(self.right_frame, width=500, height=400)
|
||||
# self.content_text.pack(pady=5, padx=5, fill="both", expand=True)
|
||||
|
||||
# # 配置文本标签 - 只使用颜色
|
||||
# self.content_text.tag_config("timestamp", foreground="#888888") # 时间戳使用灰色
|
||||
# self.content_text.tag_config("user", foreground="#4CAF50") # 用户名使用绿色
|
||||
# self.content_text.tag_config("message", foreground="#2196F3") # 消息使用蓝色
|
||||
# self.content_text.tag_config("model", foreground="#9C27B0") # 模型名称使用紫色
|
||||
# self.content_text.tag_config("prompt", foreground="#FF9800") # prompt内容使用橙色
|
||||
# self.content_text.tag_config("reasoning", foreground="#FF9800") # 推理过程使用橙色
|
||||
# self.content_text.tag_config("response", foreground="#E91E63") # 回复使用粉色
|
||||
# self.content_text.tag_config("separator", foreground="#666666") # 分隔符使用深灰色
|
||||
|
||||
# # 底部控制栏
|
||||
# self.control_frame = ctk.CTkFrame(self.frame)
|
||||
# self.control_frame.pack(fill="x", padx=10, pady=5)
|
||||
|
||||
# self.clear_button = ctk.CTkButton(self.control_frame, text="清除显示", command=self.clear_display, width=120)
|
||||
# self.clear_button.pack(side="left", padx=5)
|
||||
|
||||
# # 添加标志,标记GUI是否已关闭
|
||||
# self.is_running = True
|
||||
|
||||
# # 启动自动更新线程
|
||||
# self.update_thread = threading.Thread(target=self._auto_update, daemon=True)
|
||||
# self.update_thread.start()
|
||||
|
||||
# # 启动GUI更新检查
|
||||
# self.root.after(100, self._process_queue)
|
||||
|
||||
# def _on_closing(self):
|
||||
# """处理窗口关闭事件"""
|
||||
# # 标记GUI已关闭,防止后台线程继续访问tkinter对象
|
||||
# self.is_running = False
|
||||
|
||||
# # 安全清理所有可能的tkinter变量
|
||||
# for attr_name in list(self.__dict__.keys()):
|
||||
# if isinstance(getattr(self, attr_name), (ctk.Variable, ctk.StringVar, ctk.IntVar, ctk.DoubleVar, ctk.BooleanVar)):
|
||||
# # 删除变量前安全地将其设置为None
|
||||
# try:
|
||||
# var = getattr(self, attr_name)
|
||||
# var.set(None)
|
||||
# except Exception:
|
||||
# pass
|
||||
# setattr(self, attr_name, None)
|
||||
|
||||
# # 退出
|
||||
# self.root.quit()
|
||||
# sys.exit(0)
|
||||
|
||||
# def _process_queue(self):
|
||||
# """处理更新队列中的任务"""
|
||||
# try:
|
||||
# while True:
|
||||
# task = self.update_queue.get_nowait()
|
||||
# if task["type"] == "update_group_list":
|
||||
# self._update_group_list_gui()
|
||||
# elif task["type"] == "update_display":
|
||||
# self._update_display_gui(task["group_id"])
|
||||
# except queue.Empty:
|
||||
# pass
|
||||
# finally:
|
||||
# # 继续检查队列,但仅在GUI仍在运行时
|
||||
# if self.is_running:
|
||||
# self.root.after(100, self._process_queue)
|
||||
|
||||
# def _update_group_list_gui(self):
|
||||
# """在主线程中更新群组列表"""
|
||||
# # 清除现有按钮
|
||||
# for button in self.group_buttons.values():
|
||||
# button.destroy()
|
||||
# self.group_buttons.clear()
|
||||
|
||||
# # 创建新的群组按钮
|
||||
# for group_id in self.group_data.keys():
|
||||
# button = ctk.CTkButton(
|
||||
# self.group_scroll_frame,
|
||||
# text=f"群号: {group_id}",
|
||||
# width=160,
|
||||
# height=30,
|
||||
# corner_radius=8,
|
||||
# command=lambda gid=group_id: self._on_group_select(gid),
|
||||
# )
|
||||
# button.pack(pady=2, padx=5)
|
||||
# self.group_buttons[group_id] = button
|
||||
|
||||
# # 如果有选中的群组,保持其高亮状态
|
||||
# if self.selected_group_id and self.selected_group_id in self.group_buttons:
|
||||
# self._highlight_selected_group(self.selected_group_id)
|
||||
|
||||
# def _on_group_select(self, group_id: str):
|
||||
# """处理群组选择事件"""
|
||||
# self._highlight_selected_group(group_id)
|
||||
# self._update_display_gui(group_id)
|
||||
|
||||
# def _highlight_selected_group(self, group_id: str):
|
||||
# """高亮显示选中的群组按钮"""
|
||||
# # 重置所有按钮的颜色
|
||||
# for gid, button in self.group_buttons.items():
|
||||
# if gid == group_id:
|
||||
# # 设置选中按钮的颜色
|
||||
# button.configure(fg_color="#1E88E5", hover_color="#1976D2")
|
||||
# else:
|
||||
# # 恢复其他按钮的默认颜色
|
||||
# button.configure(fg_color="#2B2B2B", hover_color="#404040")
|
||||
|
||||
# self.selected_group_id = group_id
|
||||
|
||||
# def _update_display_gui(self, group_id: str):
|
||||
# """在主线程中更新显示内容"""
|
||||
# if group_id in self.group_data:
|
||||
# self.content_text.delete("1.0", "end")
|
||||
# for item in self.group_data[group_id]:
|
||||
# # 时间戳
|
||||
# time_str = item["time"].strftime("%Y-%m-%d %H:%M:%S")
|
||||
# self.content_text.insert("end", f"[{time_str}]\n", "timestamp")
|
||||
|
||||
# # 用户信息
|
||||
# self.content_text.insert("end", "用户: ", "timestamp")
|
||||
# self.content_text.insert("end", f"{item.get('user', '未知')}\n", "user")
|
||||
|
||||
# # 消息内容
|
||||
# self.content_text.insert("end", "消息: ", "timestamp")
|
||||
# self.content_text.insert("end", f"{item.get('message', '')}\n", "message")
|
||||
|
||||
# # 模型信息
|
||||
# self.content_text.insert("end", "模型: ", "timestamp")
|
||||
# self.content_text.insert("end", f"{item.get('model', '')}\n", "model")
|
||||
|
||||
# # Prompt内容
|
||||
# self.content_text.insert("end", "Prompt内容:\n", "timestamp")
|
||||
# prompt_text = item.get("prompt", "")
|
||||
# if prompt_text and prompt_text.lower() != "none":
|
||||
# lines = prompt_text.split("\n")
|
||||
# for line in lines:
|
||||
# if line.strip():
|
||||
# self.content_text.insert("end", " " + line + "\n", "prompt")
|
||||
# else:
|
||||
# self.content_text.insert("end", " 无Prompt内容\n", "prompt")
|
||||
|
||||
# # 推理过程
|
||||
# self.content_text.insert("end", "推理过程:\n", "timestamp")
|
||||
# reasoning_text = item.get("reasoning", "")
|
||||
# if reasoning_text and reasoning_text.lower() != "none":
|
||||
# lines = reasoning_text.split("\n")
|
||||
# for line in lines:
|
||||
# if line.strip():
|
||||
# self.content_text.insert("end", " " + line + "\n", "reasoning")
|
||||
# else:
|
||||
# self.content_text.insert("end", " 无推理过程\n", "reasoning")
|
||||
|
||||
# # 回复内容
|
||||
# self.content_text.insert("end", "回复: ", "timestamp")
|
||||
# self.content_text.insert("end", f"{item.get('response', '')}\n", "response")
|
||||
|
||||
# # 分隔符
|
||||
# self.content_text.insert("end", f"\n{'=' * 50}\n\n", "separator")
|
||||
|
||||
# # 滚动到顶部
|
||||
# self.content_text.see("1.0")
|
||||
|
||||
# def _auto_update(self):
|
||||
# """自动更新函数"""
|
||||
# while True:
|
||||
# if not self.is_running:
|
||||
# break # 如果GUI已关闭,停止线程
|
||||
|
||||
# try:
|
||||
# # 从数据库获取最新数据,只获取启动时间之后的记录
|
||||
# query = {"time": {"$gt": self.start_timestamp}}
|
||||
# logger.debug(f"查询条件: {query}")
|
||||
|
||||
# # 先获取一条记录检查时间格式
|
||||
# sample = db.reasoning_logs.find_one()
|
||||
# if sample:
|
||||
# logger.debug(f"样本记录时间格式: {type(sample['time'])} 值: {sample['time']}")
|
||||
|
||||
# cursor = db.reasoning_logs.find(query).sort("time", -1)
|
||||
# new_data = {}
|
||||
# total_count = 0
|
||||
|
||||
# for item in cursor:
|
||||
# # 调试输出
|
||||
# if total_count == 0:
|
||||
# logger.debug(f"记录时间: {item['time']}, 类型: {type(item['time'])}")
|
||||
|
||||
# total_count += 1
|
||||
# group_id = str(item.get("group_id", "unknown"))
|
||||
# if group_id not in new_data:
|
||||
# new_data[group_id] = []
|
||||
|
||||
# # 转换时间戳为datetime对象
|
||||
# if isinstance(item["time"], (int, float)):
|
||||
# time_obj = datetime.fromtimestamp(item["time"])
|
||||
# elif isinstance(item["time"], datetime):
|
||||
# time_obj = item["time"]
|
||||
# else:
|
||||
# logger.warning(f"未知的时间格式: {type(item['time'])}")
|
||||
# time_obj = datetime.now() # 使用当前时间作为后备
|
||||
|
||||
# new_data[group_id].append(
|
||||
# {
|
||||
# "time": time_obj,
|
||||
# "user": item.get("user", "未知"),
|
||||
# "message": item.get("message", ""),
|
||||
# "model": item.get("model", "未知"),
|
||||
# "reasoning": item.get("reasoning", ""),
|
||||
# "response": item.get("response", ""),
|
||||
# "prompt": item.get("prompt", ""), # 添加prompt字段
|
||||
# }
|
||||
# )
|
||||
|
||||
# logger.info(f"从数据库加载了 {total_count} 条记录,分布在 {len(new_data)} 个群组中")
|
||||
|
||||
# # 更新数据
|
||||
# if new_data != self.group_data:
|
||||
# self.group_data = new_data
|
||||
# logger.info("数据已更新,正在刷新显示...")
|
||||
# # 将更新任务添加到队列
|
||||
# self.update_queue.put({"type": "update_group_list"})
|
||||
# if self.group_data:
|
||||
# # 如果没有选中的群组,选择最新的群组
|
||||
# if not self.selected_group_id or self.selected_group_id not in self.group_data:
|
||||
# self.selected_group_id = next(iter(self.group_data))
|
||||
# self.update_queue.put({"type": "update_display", "group_id": self.selected_group_id})
|
||||
# except Exception:
|
||||
# logger.exception("自动更新出错")
|
||||
|
||||
# # 每5秒更新一次
|
||||
# time.sleep(5)
|
||||
|
||||
# def clear_display(self):
|
||||
# """清除显示内容"""
|
||||
# self.content_text.delete("1.0", "end")
|
||||
|
||||
# def run(self):
|
||||
# """运行GUI"""
|
||||
# self.root.mainloop()
|
||||
|
||||
|
||||
# def main():
|
||||
# app = ReasoningGUI()
|
||||
# app.run()
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# main()
|
||||
@@ -2,7 +2,7 @@ from .sub_heartflow import SubHeartflow
|
||||
from .observation import ChattingObservation
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from src.plugins.schedule.schedule_generator import bot_schedule
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
import asyncio
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from src.common.database import db
|
||||
from src.common.logger import get_module_logger
|
||||
import traceback
|
||||
|
||||
@@ -2,7 +2,7 @@ from .observation import Observation, ChattingObservation
|
||||
import asyncio
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
import time
|
||||
from src.plugins.chat.message import UserInfo
|
||||
from src.plugins.chat.utils import parse_text_timestamps
|
||||
@@ -44,7 +44,7 @@ def init_prompt():
|
||||
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白"
|
||||
prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n"
|
||||
prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
|
||||
prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{bot_name},{bot_name}指的就是你。"
|
||||
prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意{bot_name}指的就是你。"
|
||||
Prompt(prompt, "sub_heartflow_prompt_before")
|
||||
prompt = ""
|
||||
# prompt += f"你现在正在做的事情是:{schedule_info}\n"
|
||||
|
||||
@@ -11,7 +11,7 @@ from .heart_flow.heartflow import heartflow
|
||||
from .plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from .plugins.chat.messagesender import message_manager
|
||||
from .plugins.storage.storage import MessageStorage
|
||||
from .plugins.config.config import global_config
|
||||
from .config.config import global_config
|
||||
from .plugins.chat.bot import chat_bot
|
||||
from .common.logger import get_module_logger
|
||||
from .plugins.remote import heartbeat_thread # noqa: F401
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .chat_observer import ChatObserver
|
||||
from .pfc_utils import get_items_from_json
|
||||
from src.individuality.individuality import Individuality
|
||||
|
||||
@@ -4,7 +4,7 @@ import traceback
|
||||
from typing import Optional, Dict, Any, List
|
||||
from src.common.logger import get_module_logger
|
||||
from ..message.message_base import UserInfo
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .chat_states import NotificationManager, create_new_message_notification, create_cold_chat_notification
|
||||
from .message_storage import MongoDBMessageStorage
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ from ..chat.chat_stream import ChatStream
|
||||
from ..message.message_base import UserInfo, Seg
|
||||
from ..chat.message import Message
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from src.plugins.chat.message import MessageSending
|
||||
from ..message.api import global_api
|
||||
from ..storage.storage import MessageStorage
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import List, Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from ..chat.message import Message
|
||||
|
||||
logger = get_module_logger("knowledge_fetcher")
|
||||
|
||||
@@ -3,7 +3,7 @@ import datetime
|
||||
from typing import Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .chat_observer import ChatObserver
|
||||
from ..message.message_base import UserInfo
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .chat_observer import ChatObserver
|
||||
from .reply_checker import ReplyChecker
|
||||
from src.individuality.individuality import Individuality
|
||||
|
||||
@@ -2,7 +2,7 @@ from src.common.logger import get_module_logger
|
||||
from .chat_observer import ChatObserver
|
||||
from .conversation_info import ConversationInfo
|
||||
from src.individuality.individuality import Individuality
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from ..moods.moods import MoodManager # 导入情绪管理器
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .message import MessageRecv
|
||||
from ..PFC.pfc_manager import PFCManager
|
||||
from .chat_stream import chat_manager
|
||||
|
||||
@@ -10,7 +10,7 @@ from PIL import Image
|
||||
import io
|
||||
|
||||
from ...common.database import db
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from ..chat.utils import get_embedding
|
||||
from ..chat.utils_image import ImageManager, image_path_to_base64
|
||||
from ..models.utils_model import LLMRequest
|
||||
|
||||
@@ -3,13 +3,13 @@ from src.common.logger import get_module_logger
|
||||
import asyncio
|
||||
from dataclasses import dataclass, field
|
||||
from .message import MessageRecv
|
||||
from ..message.message_base import BaseMessageInfo, GroupInfo
|
||||
from ..message.message_base import BaseMessageInfo, GroupInfo, Seg
|
||||
import hashlib
|
||||
from typing import Dict
|
||||
from collections import OrderedDict
|
||||
import random
|
||||
import time
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
|
||||
logger = get_module_logger("message_buffer")
|
||||
|
||||
@@ -130,22 +130,40 @@ class MessageBuffer:
|
||||
keep_msgs = OrderedDict()
|
||||
combined_text = []
|
||||
found = False
|
||||
type = "text"
|
||||
type = "seglist"
|
||||
is_update = True
|
||||
for msg_id, msg in self.buffer_pool[person_id_].items():
|
||||
if msg_id == message.message_info.message_id:
|
||||
found = True
|
||||
if msg.message.message_segment.type != "seglist":
|
||||
type = msg.message.message_segment.type
|
||||
else:
|
||||
if (
|
||||
isinstance(msg.message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
|
||||
and len(msg.message.message_segment.data) == 1
|
||||
):
|
||||
type = msg.message.message_segment.data[0].type
|
||||
combined_text.append(msg.message.processed_plain_text)
|
||||
continue
|
||||
if found:
|
||||
keep_msgs[msg_id] = msg
|
||||
elif msg.result == "F":
|
||||
# 收集F消息的文本内容
|
||||
F_type = "seglist"
|
||||
if msg.message.message_segment.type != "seglist":
|
||||
F_type = msg.message.message_segment.type
|
||||
else:
|
||||
if (
|
||||
isinstance(msg.message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
|
||||
and len(msg.message.message_segment.data) == 1
|
||||
):
|
||||
F_type = msg.message.message_segment.data[0].type
|
||||
if hasattr(msg.message, "processed_plain_text") and msg.message.processed_plain_text:
|
||||
if msg.message.message_segment.type == "text":
|
||||
if F_type == "text":
|
||||
combined_text.append(msg.message.processed_plain_text)
|
||||
elif msg.message.message_segment.type != "text":
|
||||
elif F_type != "text":
|
||||
is_update = False
|
||||
elif msg.result == "U":
|
||||
logger.debug(f"异常未处理信息id: {msg.message.message_info.message_id}")
|
||||
|
||||
@@ -8,7 +8,7 @@ from ..message.api import global_api
|
||||
from .message import MessageSending, MessageThinking, MessageSet
|
||||
|
||||
from ..storage.storage import MessageStorage
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .utils import truncate_message, calculate_typing_time, count_messages_between
|
||||
|
||||
from src.common.logger import LogConfig, SENDER_STYLE_CONFIG
|
||||
|
||||
@@ -10,7 +10,7 @@ from src.common.logger import get_module_logger
|
||||
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..utils.typo_generator import ChineseTypoGenerator
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from .message import MessageRecv, Message
|
||||
from ..message.message_base import UserInfo
|
||||
from .chat_stream import ChatStream
|
||||
@@ -338,11 +338,21 @@ def random_remove_punctuation(text: str) -> str:
|
||||
|
||||
|
||||
def process_llm_response(text: str) -> List[str]:
|
||||
# 先保护颜文字
|
||||
protected_text, kaomoji_mapping = protect_kaomoji(text)
|
||||
logger.debug(f"保护颜文字后的文本: {protected_text}")
|
||||
# 提取被 () 或 [] 包裹的内容
|
||||
pattern = re.compile(r"[(\[].*?[\)\]")
|
||||
_extracted_contents = pattern.findall(text)
|
||||
pattern = re.compile(r"[\(\[\(].*?[\)\]\)]")
|
||||
# _extracted_contents = pattern.findall(text)
|
||||
_extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
|
||||
|
||||
# 去除 () 和 [] 及其包裹的内容
|
||||
cleaned_text = pattern.sub("", text)
|
||||
# cleaned_text = pattern.sub("", text)
|
||||
cleaned_text = pattern.sub("", protected_text)
|
||||
|
||||
if cleaned_text == "":
|
||||
return ["呃呃"]
|
||||
|
||||
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
|
||||
|
||||
# 对清理后的文本进行进一步处理
|
||||
@@ -382,6 +392,8 @@ def process_llm_response(text: str) -> List[str]:
|
||||
return [f"{global_config.BOT_NICKNAME}不知道哦"]
|
||||
|
||||
# sentences.extend(extracted_contents)
|
||||
# 在所有句子处理完毕后,对包含占位符的列表进行恢复
|
||||
sentences = recover_kaomoji(sentences, kaomoji_mapping)
|
||||
|
||||
return sentences
|
||||
|
||||
@@ -508,8 +520,7 @@ def protect_kaomoji(sentence):
|
||||
r"]"
|
||||
r")"
|
||||
r"|"
|
||||
r"([▼▽・ᴥω・﹏^><≧≦ ̄`´∀ヮДд︿﹀へ。゚╥╯╰︶︹•⁄]{2,15"
|
||||
r"}"
|
||||
r"([▼▽・ᴥω・﹏^><≧≦ ̄`´∀ヮДд︿﹀へ。゚╥╯╰︶︹•⁄]{2,15})"
|
||||
)
|
||||
|
||||
kaomoji_matches = kaomoji_pattern.findall(sentence)
|
||||
@@ -706,12 +717,30 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
|
||||
# normal模式: 直接转换所有时间戳
|
||||
if mode == "normal":
|
||||
result_text = text
|
||||
|
||||
# 将时间戳转换为可读格式并记录相同格式的时间戳
|
||||
timestamp_readable_map = {}
|
||||
readable_time_used = set()
|
||||
|
||||
for match in matches:
|
||||
timestamp = float(match.group(1))
|
||||
readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
|
||||
# 由于替换会改变文本长度,需要使用正则替换而非直接替换
|
||||
pattern_instance = re.escape(match.group(0))
|
||||
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
|
||||
timestamp_readable_map[match.group(0)] = (timestamp, readable_time)
|
||||
|
||||
# 按时间戳排序
|
||||
sorted_timestamps = sorted(timestamp_readable_map.items(), key=lambda x: x[1][0])
|
||||
|
||||
# 执行替换,相同格式的只保留最早的
|
||||
for ts_str, (_, readable) in sorted_timestamps:
|
||||
pattern_instance = re.escape(ts_str)
|
||||
if readable in readable_time_used:
|
||||
# 如果这个可读时间已经使用过,替换为空字符串
|
||||
result_text = re.sub(pattern_instance, "", result_text, count=1)
|
||||
else:
|
||||
# 否则替换为可读时间并记录
|
||||
result_text = re.sub(pattern_instance, readable, result_text, count=1)
|
||||
readable_time_used.add(readable)
|
||||
|
||||
return result_text
|
||||
else:
|
||||
# lite模式: 按5秒间隔划分并选择性转换
|
||||
@@ -770,15 +799,30 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
|
||||
pattern_instance = re.escape(match.group(0))
|
||||
result_text = re.sub(pattern_instance, "", result_text, count=1)
|
||||
|
||||
# 按照时间戳原始顺序排序,避免替换时位置错误
|
||||
to_convert.sort(key=lambda x: x[1].start())
|
||||
# 按照时间戳升序排序
|
||||
to_convert.sort(key=lambda x: x[0])
|
||||
|
||||
# 将时间戳转换为可读时间并记录哪些可读时间已经使用过
|
||||
converted_timestamps = []
|
||||
readable_time_used = set()
|
||||
|
||||
# 执行替换
|
||||
# 由于替换会改变文本长度,从后向前替换
|
||||
to_convert.reverse()
|
||||
for ts, match in to_convert:
|
||||
readable_time = translate_timestamp_to_human_readable(ts, "relative")
|
||||
converted_timestamps.append((ts, match, readable_time))
|
||||
|
||||
# 按照时间戳原始顺序排序,避免替换时位置错误
|
||||
converted_timestamps.sort(key=lambda x: x[1].start())
|
||||
|
||||
# 从后向前替换,避免位置改变
|
||||
converted_timestamps.reverse()
|
||||
for match, readable_time in converted_timestamps:
|
||||
pattern_instance = re.escape(match.group(0))
|
||||
if readable_time in readable_time_used:
|
||||
# 如果相同格式的时间已存在,替换为空字符串
|
||||
result_text = re.sub(pattern_instance, "", result_text, count=1)
|
||||
else:
|
||||
# 否则替换为可读时间并记录
|
||||
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
|
||||
readable_time_used.add(readable_time)
|
||||
|
||||
return result_text
|
||||
|
||||
@@ -8,7 +8,7 @@ import io
|
||||
|
||||
|
||||
from ...common.database import db
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from ..models.utils_model import LLMRequest
|
||||
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from src.common.logger import get_module_logger
|
||||
from src.plugins.chat.message import MessageRecv
|
||||
from src.plugins.storage.storage import MessageStorage
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from datetime import datetime
|
||||
|
||||
logger = get_module_logger("pfc_message_processor")
|
||||
|
||||
@@ -4,7 +4,7 @@ import traceback
|
||||
from typing import List
|
||||
from ...memory_system.Hippocampus import HippocampusManager
|
||||
from ...moods.moods import MoodManager
|
||||
from ...config.config import global_config
|
||||
from ....config.config import global_config
|
||||
from ...chat.emoji_manager import emoji_manager
|
||||
from .reasoning_generator import ResponseGenerator
|
||||
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
|
||||
@@ -192,11 +192,21 @@ class ReasoningChat:
|
||||
if not buffer_result:
|
||||
await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
|
||||
willing_manager.delete(message.message_info.message_id)
|
||||
if message.message_segment.type == "text":
|
||||
F_type = "seglist"
|
||||
if message.message_segment.type != "seglist":
|
||||
F_type = message.message_segment.type
|
||||
else:
|
||||
if (
|
||||
isinstance(message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in message.message_segment.data)
|
||||
and len(message.message_segment.data) == 1
|
||||
):
|
||||
F_type = message.message_segment.data[0].type
|
||||
if F_type == "text":
|
||||
logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}")
|
||||
elif message.message_segment.type == "image":
|
||||
elif F_type == "image":
|
||||
logger.info("触发缓冲,已炸飞表情包/图片")
|
||||
elif message.message_segment.type == "seglist":
|
||||
elif F_type == "seglist":
|
||||
logger.info("触发缓冲,已炸飞消息列")
|
||||
return
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import List, Optional, Tuple, Union
|
||||
import random
|
||||
|
||||
from ...models.utils_model import LLMRequest
|
||||
from ...config.config import global_config
|
||||
from ....config.config import global_config
|
||||
from ...chat.message import MessageThinking
|
||||
from .reasoning_prompt_builder import prompt_builder
|
||||
from ...chat.utils import process_llm_response
|
||||
|
||||
@@ -9,7 +9,7 @@ from ...moods.moods import MoodManager
|
||||
from ....individuality.individuality import Individuality
|
||||
from ...memory_system.Hippocampus import HippocampusManager
|
||||
from ...schedule.schedule_generator import bot_schedule
|
||||
from ...config.config import global_config
|
||||
from ....config.config import global_config
|
||||
from ...person_info.relationship_manager import relationship_manager
|
||||
from src.common.logger import get_module_logger
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
|
||||
@@ -4,7 +4,7 @@ import traceback
|
||||
from typing import List
|
||||
from ...memory_system.Hippocampus import HippocampusManager
|
||||
from ...moods.moods import MoodManager
|
||||
from ...config.config import global_config
|
||||
from ....config.config import global_config
|
||||
from ...chat.emoji_manager import emoji_manager
|
||||
from .think_flow_generator import ResponseGenerator
|
||||
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
|
||||
@@ -204,11 +204,21 @@ class ThinkFlowChat:
|
||||
if not buffer_result:
|
||||
await willing_manager.bombing_buffer_message_handle(message.message_info.message_id)
|
||||
willing_manager.delete(message.message_info.message_id)
|
||||
if message.message_segment.type == "text":
|
||||
F_type = "seglist"
|
||||
if message.message_segment.type != "seglist":
|
||||
F_type = message.message_segment.type
|
||||
else:
|
||||
if (
|
||||
isinstance(message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in message.message_segment.data)
|
||||
and len(message.message_segment.data) == 1
|
||||
):
|
||||
F_type = message.message_segment.data[0].type
|
||||
if F_type == "text":
|
||||
logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}")
|
||||
elif message.message_segment.type == "image":
|
||||
elif F_type == "image":
|
||||
logger.info("触发缓冲,已炸飞表情包/图片")
|
||||
elif message.message_segment.type == "seglist":
|
||||
elif F_type == "seglist":
|
||||
logger.info("触发缓冲,已炸飞消息列")
|
||||
return
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import random
|
||||
|
||||
|
||||
from ...models.utils_model import LLMRequest
|
||||
from ...config.config import global_config
|
||||
from ....config.config import global_config
|
||||
from ...chat.message import MessageRecv
|
||||
from .think_flow_prompt_builder import prompt_builder
|
||||
from ...chat.utils import process_llm_response
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import random
|
||||
from typing import Optional
|
||||
|
||||
from ...config.config import global_config
|
||||
from ....config.config import global_config
|
||||
from ...chat.utils import get_recent_group_detailed_plain_text
|
||||
from ...chat.chat_stream import chat_manager
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
class EnvConfig:
|
||||
_instance = None
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = super(EnvConfig, cls).__new__(cls)
|
||||
cls._instance._initialized = False
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
if self._initialized:
|
||||
return
|
||||
|
||||
self._initialized = True
|
||||
self.ROOT_DIR = Path(__file__).parent.parent.parent.parent
|
||||
self.load_env()
|
||||
|
||||
def load_env(self):
|
||||
env_file = self.ROOT_DIR / ".env"
|
||||
if env_file.exists():
|
||||
load_dotenv(env_file)
|
||||
|
||||
# 根据ENVIRONMENT变量加载对应的环境文件
|
||||
env_type = os.getenv("ENVIRONMENT", "prod")
|
||||
if env_type == "dev":
|
||||
env_file = self.ROOT_DIR / ".env.dev"
|
||||
elif env_type == "prod":
|
||||
env_file = self.ROOT_DIR / ".env"
|
||||
|
||||
if env_file.exists():
|
||||
load_dotenv(env_file, override=True)
|
||||
|
||||
def get(self, key, default=None):
|
||||
return os.getenv(key, default)
|
||||
|
||||
def get_all(self):
|
||||
return dict(os.environ)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return self.get(name)
|
||||
|
||||
|
||||
# 创建全局实例
|
||||
env_config = EnvConfig()
|
||||
|
||||
|
||||
# 导出环境变量
|
||||
def get_env(key, default=None):
|
||||
return os.getenv(key, default)
|
||||
|
||||
|
||||
# 导出所有环境变量
|
||||
def get_all_env():
|
||||
return dict(os.environ)
|
||||
@@ -7,7 +7,7 @@ import os
|
||||
# 添加项目根目录到系统路径
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
|
||||
|
||||
async def test_memory_system():
|
||||
|
||||
@@ -11,7 +11,7 @@ from PIL import Image
|
||||
import io
|
||||
import os
|
||||
from ...common.database import db
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
|
||||
logger = get_module_logger("model_utils")
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from src.common.logger import get_module_logger, LogConfig, MOOD_STYLE_CONFIG
|
||||
from ..person_info.relationship_manager import relationship_manager
|
||||
from src.individuality.individuality import Individuality
|
||||
|
||||
@@ -7,7 +7,7 @@ import datetime
|
||||
import asyncio
|
||||
import numpy as np
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from src.individuality.individuality import Individuality
|
||||
|
||||
import matplotlib
|
||||
@@ -354,7 +354,7 @@ class PersonInfoManager:
|
||||
"""启动个人信息推断,每天根据一定条件推断一次"""
|
||||
try:
|
||||
while 1:
|
||||
await asyncio.sleep(60)
|
||||
await asyncio.sleep(600)
|
||||
current_time = datetime.datetime.now()
|
||||
logger.info(f"个人信息推断启动: {current_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# from .questionnaire import PERSONALITY_QUESTIONS, FACTOR_DESCRIPTIONS
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import random
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
project_root = current_dir.parent.parent.parent
|
||||
env_path = project_root / ".env"
|
||||
|
||||
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
|
||||
sys.path.append(root_path)
|
||||
|
||||
from src.plugins.personality.questionnaire import PERSONALITY_QUESTIONS, FACTOR_DESCRIPTIONS # noqa: E402
|
||||
|
||||
|
||||
class BigFiveTest:
|
||||
def __init__(self):
|
||||
self.questions = PERSONALITY_QUESTIONS
|
||||
self.factors = FACTOR_DESCRIPTIONS
|
||||
|
||||
def run_test(self):
|
||||
"""运行测试并收集答案"""
|
||||
print("\n欢迎参加中国大五人格测试!")
|
||||
print("\n本测试采用六级评分,请根据每个描述与您的符合程度进行打分:")
|
||||
print("1 = 完全不符合")
|
||||
print("2 = 比较不符合")
|
||||
print("3 = 有点不符合")
|
||||
print("4 = 有点符合")
|
||||
print("5 = 比较符合")
|
||||
print("6 = 完全符合")
|
||||
print("\n请认真阅读每个描述,选择最符合您实际情况的选项。\n")
|
||||
|
||||
# 创建题目序号到题目的映射
|
||||
questions_map = {q["id"]: q for q in self.questions}
|
||||
|
||||
# 获取所有题目ID并随机打乱顺序
|
||||
question_ids = list(questions_map.keys())
|
||||
random.shuffle(question_ids)
|
||||
|
||||
answers = {}
|
||||
total_questions = len(question_ids)
|
||||
|
||||
for i, question_id in enumerate(question_ids, 1):
|
||||
question = questions_map[question_id]
|
||||
while True:
|
||||
try:
|
||||
print(f"\n[{i}/{total_questions}] {question['content']}")
|
||||
score = int(input("您的评分(1-6): "))
|
||||
if 1 <= score <= 6:
|
||||
answers[question_id] = score
|
||||
break
|
||||
else:
|
||||
print("请输入1-6之间的数字!")
|
||||
except ValueError:
|
||||
print("请输入有效的数字!")
|
||||
|
||||
return self.calculate_scores(answers)
|
||||
|
||||
def calculate_scores(self, answers):
|
||||
"""计算各维度得分"""
|
||||
results = {}
|
||||
factor_questions = {"外向性": [], "神经质": [], "严谨性": [], "开放性": [], "宜人性": []}
|
||||
|
||||
# 将题目按因子分类
|
||||
for q in self.questions:
|
||||
factor_questions[q["factor"]].append(q)
|
||||
|
||||
# 计算每个维度的得分
|
||||
for factor, questions in factor_questions.items():
|
||||
total_score = 0
|
||||
for q in questions:
|
||||
score = answers[q["id"]]
|
||||
# 处理反向计分题目
|
||||
if q["reverse_scoring"]:
|
||||
score = 7 - score # 6分量表反向计分为7减原始分
|
||||
total_score += score
|
||||
|
||||
# 计算平均分
|
||||
avg_score = round(total_score / len(questions), 2)
|
||||
results[factor] = {"得分": avg_score, "题目数": len(questions), "总分": total_score}
|
||||
|
||||
return results
|
||||
|
||||
def get_factor_description(self, factor):
|
||||
"""获取因子的详细描述"""
|
||||
return self.factors[factor]
|
||||
|
||||
|
||||
def main():
|
||||
test = BigFiveTest()
|
||||
results = test.run_test()
|
||||
|
||||
print("\n测试结果:")
|
||||
print("=" * 50)
|
||||
for factor, data in results.items():
|
||||
print(f"\n{factor}:")
|
||||
print(f"平均分: {data['得分']} (总分: {data['总分']}, 题目数: {data['题目数']})")
|
||||
print("-" * 30)
|
||||
description = test.get_factor_description(factor)
|
||||
print("维度说明:", description["description"][:100] + "...")
|
||||
print("\n特征词:", ", ".join(description["trait_words"]))
|
||||
print("=" * 50)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,353 +0,0 @@
|
||||
"""
|
||||
基于聊天记录的人格特征分析系统
|
||||
"""
|
||||
|
||||
from typing import Dict, List
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
import sys
|
||||
import random
|
||||
from collections import defaultdict
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from datetime import datetime
|
||||
import matplotlib.font_manager as fm
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
project_root = current_dir.parent.parent.parent
|
||||
env_path = project_root / ".env"
|
||||
|
||||
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
|
||||
sys.path.append(root_path)
|
||||
|
||||
from src.plugins.personality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402
|
||||
from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402
|
||||
from src.plugins.personality.offline_llm import LLMModel # noqa: E402
|
||||
from src.plugins.personality.who_r_u import MessageAnalyzer # noqa: E402
|
||||
|
||||
# 加载环境变量
|
||||
if env_path.exists():
|
||||
print(f"从 {env_path} 加载环境变量")
|
||||
load_dotenv(env_path)
|
||||
else:
|
||||
print(f"未找到环境变量文件: {env_path}")
|
||||
print("将使用默认配置")
|
||||
|
||||
|
||||
class ChatBasedPersonalityEvaluator:
|
||||
def __init__(self):
|
||||
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
self.scenarios = []
|
||||
self.message_analyzer = MessageAnalyzer()
|
||||
self.llm = LLMModel()
|
||||
self.trait_scores_history = defaultdict(list) # 记录每个特质的得分历史
|
||||
|
||||
# 为每个人格特质获取对应的场景
|
||||
for trait in PERSONALITY_SCENES:
|
||||
scenes = get_scene_by_factor(trait)
|
||||
if not scenes:
|
||||
continue
|
||||
scene_keys = list(scenes.keys())
|
||||
selected_scenes = random.sample(scene_keys, min(3, len(scene_keys)))
|
||||
|
||||
for scene_key in selected_scenes:
|
||||
scene = scenes[scene_key]
|
||||
other_traits = [t for t in PERSONALITY_SCENES if t != trait]
|
||||
secondary_trait = random.choice(other_traits)
|
||||
self.scenarios.append(
|
||||
{"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
|
||||
)
|
||||
|
||||
def analyze_chat_context(self, messages: List[Dict]) -> str:
|
||||
"""
|
||||
分析一组消息的上下文,生成场景描述
|
||||
"""
|
||||
context = ""
|
||||
for msg in messages:
|
||||
nickname = msg.get("user_info", {}).get("user_nickname", "未知用户")
|
||||
content = msg.get("processed_plain_text", msg.get("detailed_plain_text", ""))
|
||||
if content:
|
||||
context += f"{nickname}: {content}\n"
|
||||
return context
|
||||
|
||||
def evaluate_chat_response(
|
||||
self, user_nickname: str, chat_context: str, dimensions: List[str] = None
|
||||
) -> Dict[str, float]:
|
||||
"""
|
||||
评估聊天内容在各个人格维度上的得分
|
||||
"""
|
||||
# 使用所有维度进行评估
|
||||
dimensions = list(self.personality_traits.keys())
|
||||
|
||||
dimension_descriptions = []
|
||||
for dim in dimensions:
|
||||
desc = FACTOR_DESCRIPTIONS.get(dim, "")
|
||||
if desc:
|
||||
dimension_descriptions.append(f"- {dim}:{desc}")
|
||||
|
||||
dimensions_text = "\n".join(dimension_descriptions)
|
||||
|
||||
prompt = f"""请根据以下聊天记录,评估"{user_nickname}"在大五人格模型中的维度得分(1-6分)。
|
||||
|
||||
聊天记录:
|
||||
{chat_context}
|
||||
|
||||
需要评估的维度说明:
|
||||
{dimensions_text}
|
||||
|
||||
请按照以下格式输出评估结果,注意,你的评价对象是"{user_nickname}"(仅输出JSON格式):
|
||||
{{
|
||||
"开放性": 分数,
|
||||
"严谨性": 分数,
|
||||
"外向性": 分数,
|
||||
"宜人性": 分数,
|
||||
"神经质": 分数
|
||||
}}
|
||||
|
||||
评分标准:
|
||||
1 = 非常不符合该维度特征
|
||||
2 = 比较不符合该维度特征
|
||||
3 = 有点不符合该维度特征
|
||||
4 = 有点符合该维度特征
|
||||
5 = 比较符合该维度特征
|
||||
6 = 非常符合该维度特征
|
||||
|
||||
如果你觉得某个维度没有相关信息或者无法判断,请输出0分
|
||||
|
||||
请根据聊天记录的内容和语气,结合维度说明进行评分。如果维度可以评分,确保分数在1-6之间。如果没有体现,请输出0分"""
|
||||
|
||||
try:
|
||||
ai_response, _ = self.llm.generate_response(prompt)
|
||||
start_idx = ai_response.find("{")
|
||||
end_idx = ai_response.rfind("}") + 1
|
||||
if start_idx != -1 and end_idx != 0:
|
||||
json_str = ai_response[start_idx:end_idx]
|
||||
scores = json.loads(json_str)
|
||||
return {k: max(0, min(6, float(v))) for k, v in scores.items()}
|
||||
else:
|
||||
print("AI响应格式不正确,使用默认评分")
|
||||
return {dim: 0 for dim in dimensions}
|
||||
except Exception as e:
|
||||
print(f"评估过程出错:{str(e)}")
|
||||
return {dim: 0 for dim in dimensions}
|
||||
|
||||
def evaluate_user_personality(self, qq_id: str, num_samples: int = 10, context_length: int = 5) -> Dict:
|
||||
"""
|
||||
基于用户的聊天记录评估人格特征
|
||||
|
||||
Args:
|
||||
qq_id (str): 用户QQ号
|
||||
num_samples (int): 要分析的聊天片段数量
|
||||
context_length (int): 每个聊天片段的上下文长度
|
||||
|
||||
Returns:
|
||||
Dict: 评估结果
|
||||
"""
|
||||
# 获取用户的随机消息及其上下文
|
||||
chat_contexts, user_nickname = self.message_analyzer.get_user_random_contexts(
|
||||
qq_id, num_messages=num_samples, context_length=context_length
|
||||
)
|
||||
if not chat_contexts:
|
||||
return {"error": f"没有找到QQ号 {qq_id} 的消息记录"}
|
||||
|
||||
# 初始化评分
|
||||
final_scores = defaultdict(float)
|
||||
dimension_counts = defaultdict(int)
|
||||
chat_samples = []
|
||||
|
||||
# 清空历史记录
|
||||
self.trait_scores_history.clear()
|
||||
|
||||
# 分析每个聊天上下文
|
||||
for chat_context in chat_contexts:
|
||||
# 评估这段聊天内容的所有维度
|
||||
scores = self.evaluate_chat_response(user_nickname, chat_context)
|
||||
|
||||
# 记录样本
|
||||
chat_samples.append(
|
||||
{"聊天内容": chat_context, "评估维度": list(self.personality_traits.keys()), "评分": scores}
|
||||
)
|
||||
|
||||
# 更新总分和历史记录
|
||||
for dimension, score in scores.items():
|
||||
if score > 0: # 只统计大于0的有效分数
|
||||
final_scores[dimension] += score
|
||||
dimension_counts[dimension] += 1
|
||||
self.trait_scores_history[dimension].append(score)
|
||||
|
||||
# 计算平均分
|
||||
average_scores = {}
|
||||
for dimension in self.personality_traits:
|
||||
if dimension_counts[dimension] > 0:
|
||||
average_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
|
||||
else:
|
||||
average_scores[dimension] = 0 # 如果没有有效分数,返回0
|
||||
|
||||
# 生成趋势图
|
||||
self._generate_trend_plot(qq_id, user_nickname)
|
||||
|
||||
result = {
|
||||
"用户QQ": qq_id,
|
||||
"用户昵称": user_nickname,
|
||||
"样本数量": len(chat_samples),
|
||||
"人格特征评分": average_scores,
|
||||
"维度评估次数": dict(dimension_counts),
|
||||
"详细样本": chat_samples,
|
||||
"特质得分历史": {k: v for k, v in self.trait_scores_history.items()},
|
||||
}
|
||||
|
||||
# 保存结果
|
||||
os.makedirs("results", exist_ok=True)
|
||||
result_file = f"results/personality_result_{qq_id}.json"
|
||||
with open(result_file, "w", encoding="utf-8") as f:
|
||||
json.dump(result, f, ensure_ascii=False, indent=2)
|
||||
|
||||
return result
|
||||
|
||||
def _generate_trend_plot(self, qq_id: str, user_nickname: str):
|
||||
"""
|
||||
生成人格特质累计平均分变化趋势图
|
||||
"""
|
||||
# 查找系统中可用的中文字体
|
||||
chinese_fonts = []
|
||||
for f in fm.fontManager.ttflist:
|
||||
try:
|
||||
if "简" in f.name or "SC" in f.name or "黑" in f.name or "宋" in f.name or "微软" in f.name:
|
||||
chinese_fonts.append(f.name)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if chinese_fonts:
|
||||
plt.rcParams["font.sans-serif"] = chinese_fonts + ["SimHei", "Microsoft YaHei", "Arial Unicode MS"]
|
||||
else:
|
||||
# 如果没有找到中文字体,使用默认字体,并将中文昵称转换为拼音或英文
|
||||
try:
|
||||
from pypinyin import lazy_pinyin
|
||||
|
||||
user_nickname = "".join(lazy_pinyin(user_nickname))
|
||||
except ImportError:
|
||||
user_nickname = "User" # 如果无法转换为拼音,使用默认英文
|
||||
|
||||
plt.rcParams["axes.unicode_minus"] = False # 解决负号显示问题
|
||||
|
||||
plt.figure(figsize=(12, 6))
|
||||
plt.style.use("bmh") # 使用内置的bmh样式,它有类似seaborn的美观效果
|
||||
|
||||
colors = {
|
||||
"开放性": "#FF9999",
|
||||
"严谨性": "#66B2FF",
|
||||
"外向性": "#99FF99",
|
||||
"宜人性": "#FFCC99",
|
||||
"神经质": "#FF99CC",
|
||||
}
|
||||
|
||||
# 计算每个维度在每个时间点的累计平均分
|
||||
cumulative_averages = {}
|
||||
for trait, scores in self.trait_scores_history.items():
|
||||
if not scores:
|
||||
continue
|
||||
|
||||
averages = []
|
||||
total = 0
|
||||
valid_count = 0
|
||||
for score in scores:
|
||||
if score > 0: # 只计算大于0的有效分数
|
||||
total += score
|
||||
valid_count += 1
|
||||
if valid_count > 0:
|
||||
averages.append(total / valid_count)
|
||||
else:
|
||||
# 如果当前分数无效,使用前一个有效的平均分
|
||||
if averages:
|
||||
averages.append(averages[-1])
|
||||
else:
|
||||
continue # 跳过无效分数
|
||||
|
||||
if averages: # 只有在有有效分数的情况下才添加到累计平均中
|
||||
cumulative_averages[trait] = averages
|
||||
|
||||
# 绘制每个维度的累计平均分变化趋势
|
||||
for trait, averages in cumulative_averages.items():
|
||||
x = range(1, len(averages) + 1)
|
||||
plt.plot(x, averages, "o-", label=trait, color=colors.get(trait), linewidth=2, markersize=8)
|
||||
|
||||
# 添加趋势线
|
||||
z = np.polyfit(x, averages, 1)
|
||||
p = np.poly1d(z)
|
||||
plt.plot(x, p(x), "--", color=colors.get(trait), alpha=0.5)
|
||||
|
||||
plt.title(f"{user_nickname} 的人格特质累计平均分变化趋势", fontsize=14, pad=20)
|
||||
plt.xlabel("评估次数", fontsize=12)
|
||||
plt.ylabel("累计平均分", fontsize=12)
|
||||
plt.grid(True, linestyle="--", alpha=0.7)
|
||||
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5))
|
||||
plt.ylim(0, 7)
|
||||
plt.tight_layout()
|
||||
|
||||
# 保存图表
|
||||
os.makedirs("results/plots", exist_ok=True)
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
plot_file = f"results/plots/personality_trend_{qq_id}_{timestamp}.png"
|
||||
plt.savefig(plot_file, dpi=300, bbox_inches="tight")
|
||||
plt.close()
|
||||
|
||||
|
||||
def analyze_user_personality(qq_id: str, num_samples: int = 10, context_length: int = 5) -> str:
|
||||
"""
|
||||
分析用户人格特征的便捷函数
|
||||
|
||||
Args:
|
||||
qq_id (str): 用户QQ号
|
||||
num_samples (int): 要分析的聊天片段数量
|
||||
context_length (int): 每个聊天片段的上下文长度
|
||||
|
||||
Returns:
|
||||
str: 格式化的分析结果
|
||||
"""
|
||||
evaluator = ChatBasedPersonalityEvaluator()
|
||||
result = evaluator.evaluate_user_personality(qq_id, num_samples, context_length)
|
||||
|
||||
if "error" in result:
|
||||
return result["error"]
|
||||
|
||||
# 格式化输出
|
||||
output = f"QQ号 {qq_id} ({result['用户昵称']}) 的人格特征分析结果:\n"
|
||||
output += "=" * 50 + "\n\n"
|
||||
|
||||
output += "人格特征评分:\n"
|
||||
for trait, score in result["人格特征评分"].items():
|
||||
if score == 0:
|
||||
output += f"{trait}: 数据不足,无法判断 (评估次数: {result['维度评估次数'].get(trait, 0)})\n"
|
||||
else:
|
||||
output += f"{trait}: {score}/6 (评估次数: {result['维度评估次数'].get(trait, 0)})\n"
|
||||
|
||||
# 添加变化趋势描述
|
||||
if trait in result["特质得分历史"] and len(result["特质得分历史"][trait]) > 1:
|
||||
scores = [s for s in result["特质得分历史"][trait] if s != 0] # 过滤掉无效分数
|
||||
if len(scores) > 1: # 确保有足够的有效分数计算趋势
|
||||
trend = np.polyfit(range(len(scores)), scores, 1)[0]
|
||||
if abs(trend) < 0.1:
|
||||
trend_desc = "保持稳定"
|
||||
elif trend > 0:
|
||||
trend_desc = "呈上升趋势"
|
||||
else:
|
||||
trend_desc = "呈下降趋势"
|
||||
output += f" 变化趋势: {trend_desc} (斜率: {trend:.2f})\n"
|
||||
|
||||
output += f"\n分析样本数量:{result['样本数量']}\n"
|
||||
output += f"结果已保存至:results/personality_result_{qq_id}.json\n"
|
||||
output += "变化趋势图已保存至:results/plots/目录\n"
|
||||
|
||||
return output
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 测试代码
|
||||
# test_qq = "" # 替换为要测试的QQ号
|
||||
# print(analyze_user_personality(test_qq, num_samples=30, context_length=20))
|
||||
# test_qq = ""
|
||||
# print(analyze_user_personality(test_qq, num_samples=30, context_length=20))
|
||||
test_qq = "1026294844"
|
||||
print(analyze_user_personality(test_qq, num_samples=30, context_length=30))
|
||||
@@ -1,349 +0,0 @@
|
||||
from typing import Dict
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from datetime import datetime
|
||||
import random
|
||||
from scipy import stats # 添加scipy导入用于t检验
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
project_root = current_dir.parent.parent.parent
|
||||
env_path = project_root / ".env"
|
||||
|
||||
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
|
||||
sys.path.append(root_path)
|
||||
|
||||
from src.plugins.personality.big5_test import BigFiveTest # noqa: E402
|
||||
from src.plugins.personality.renqingziji import PersonalityEvaluator_direct # noqa: E402
|
||||
from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS, PERSONALITY_QUESTIONS # noqa: E402
|
||||
|
||||
|
||||
class CombinedPersonalityTest:
|
||||
def __init__(self):
|
||||
self.big5_test = BigFiveTest()
|
||||
self.scenario_test = PersonalityEvaluator_direct()
|
||||
self.dimensions = ["开放性", "严谨性", "外向性", "宜人性", "神经质"]
|
||||
|
||||
def run_combined_test(self):
|
||||
"""运行组合测试"""
|
||||
print("\n=== 人格特征综合评估系统 ===")
|
||||
print("\n本测试将通过两种方式评估人格特征:")
|
||||
print("1. 传统问卷测评(约40题)")
|
||||
print("2. 情景反应测评(15个场景)")
|
||||
print("\n两种测评完成后,将对比分析结果的异同。")
|
||||
input("\n准备好开始第一部分(问卷测评)了吗?按回车继续...")
|
||||
|
||||
# 运行问卷测试
|
||||
print("\n=== 第一部分:问卷测评 ===")
|
||||
print("本部分采用六级评分,请根据每个描述与您的符合程度进行打分:")
|
||||
print("1 = 完全不符合")
|
||||
print("2 = 比较不符合")
|
||||
print("3 = 有点不符合")
|
||||
print("4 = 有点符合")
|
||||
print("5 = 比较符合")
|
||||
print("6 = 完全符合")
|
||||
print("\n重要提示:您可以选择以下两种方式之一来回答问题:")
|
||||
print("1. 根据您自身的真实情况来回答")
|
||||
print("2. 根据您想要扮演的角色特征来回答")
|
||||
print("\n无论选择哪种方式,请保持一致并认真回答每个问题。")
|
||||
input("\n按回车开始答题...")
|
||||
|
||||
questionnaire_results = self.run_questionnaire()
|
||||
|
||||
# 转换问卷结果格式以便比较
|
||||
questionnaire_scores = {factor: data["得分"] for factor, data in questionnaire_results.items()}
|
||||
|
||||
# 运行情景测试
|
||||
print("\n=== 第二部分:情景反应测评 ===")
|
||||
print("接下来,您将面对一系列具体场景,请描述您在每个场景中可能的反应。")
|
||||
print("每个场景都会评估不同的人格维度,共15个场景。")
|
||||
print("您可以选择提供自己的真实反应,也可以选择扮演一个您创作的角色来回答。")
|
||||
input("\n准备好开始了吗?按回车继续...")
|
||||
|
||||
scenario_results = self.run_scenario_test()
|
||||
|
||||
# 比较和展示结果
|
||||
self.compare_and_display_results(questionnaire_scores, scenario_results)
|
||||
|
||||
# 保存结果
|
||||
self.save_results(questionnaire_scores, scenario_results)
|
||||
|
||||
def run_questionnaire(self):
|
||||
"""运行问卷测试部分"""
|
||||
# 创建题目序号到题目的映射
|
||||
questions_map = {q["id"]: q for q in PERSONALITY_QUESTIONS}
|
||||
|
||||
# 获取所有题目ID并随机打乱顺序
|
||||
question_ids = list(questions_map.keys())
|
||||
random.shuffle(question_ids)
|
||||
|
||||
answers = {}
|
||||
total_questions = len(question_ids)
|
||||
|
||||
for i, question_id in enumerate(question_ids, 1):
|
||||
question = questions_map[question_id]
|
||||
while True:
|
||||
try:
|
||||
print(f"\n问题 [{i}/{total_questions}]")
|
||||
print(f"{question['content']}")
|
||||
score = int(input("您的评分(1-6): "))
|
||||
if 1 <= score <= 6:
|
||||
answers[question_id] = score
|
||||
break
|
||||
else:
|
||||
print("请输入1-6之间的数字!")
|
||||
except ValueError:
|
||||
print("请输入有效的数字!")
|
||||
|
||||
# 每10题显示一次进度
|
||||
if i % 10 == 0:
|
||||
print(f"\n已完成 {i}/{total_questions} 题 ({int(i / total_questions * 100)}%)")
|
||||
|
||||
return self.calculate_questionnaire_scores(answers)
|
||||
|
||||
def calculate_questionnaire_scores(self, answers):
|
||||
"""计算问卷测试的维度得分"""
|
||||
results = {}
|
||||
factor_questions = {"外向性": [], "神经质": [], "严谨性": [], "开放性": [], "宜人性": []}
|
||||
|
||||
# 将题目按因子分类
|
||||
for q in PERSONALITY_QUESTIONS:
|
||||
factor_questions[q["factor"]].append(q)
|
||||
|
||||
# 计算每个维度的得分
|
||||
for factor, questions in factor_questions.items():
|
||||
total_score = 0
|
||||
for q in questions:
|
||||
score = answers[q["id"]]
|
||||
# 处理反向计分题目
|
||||
if q["reverse_scoring"]:
|
||||
score = 7 - score # 6分量表反向计分为7减原始分
|
||||
total_score += score
|
||||
|
||||
# 计算平均分
|
||||
avg_score = round(total_score / len(questions), 2)
|
||||
results[factor] = {"得分": avg_score, "题目数": len(questions), "总分": total_score}
|
||||
|
||||
return results
|
||||
|
||||
def run_scenario_test(self):
|
||||
"""运行情景测试部分"""
|
||||
final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
dimension_counts = {trait: 0 for trait in final_scores.keys()}
|
||||
|
||||
# 随机打乱场景顺序
|
||||
scenarios = self.scenario_test.scenarios.copy()
|
||||
random.shuffle(scenarios)
|
||||
|
||||
for i, scenario_data in enumerate(scenarios, 1):
|
||||
print(f"\n场景 [{i}/{len(scenarios)}] - {scenario_data['场景编号']}")
|
||||
print("-" * 50)
|
||||
print(scenario_data["场景"])
|
||||
print("\n请描述您在这种情况下会如何反应:")
|
||||
response = input().strip()
|
||||
|
||||
if not response:
|
||||
print("反应描述不能为空!")
|
||||
continue
|
||||
|
||||
print("\n正在评估您的描述...")
|
||||
scores = self.scenario_test.evaluate_response(scenario_data["场景"], response, scenario_data["评估维度"])
|
||||
|
||||
# 更新分数
|
||||
for dimension, score in scores.items():
|
||||
final_scores[dimension] += score
|
||||
dimension_counts[dimension] += 1
|
||||
|
||||
# print("\n当前场景评估结果:")
|
||||
# print("-" * 30)
|
||||
# for dimension, score in scores.items():
|
||||
# print(f"{dimension}: {score}/6")
|
||||
|
||||
# 每5个场景显示一次总进度
|
||||
if i % 5 == 0:
|
||||
print(f"\n已完成 {i}/{len(scenarios)} 个场景 ({int(i / len(scenarios) * 100)}%)")
|
||||
|
||||
if i < len(scenarios):
|
||||
input("\n按回车继续下一个场景...")
|
||||
|
||||
# 计算平均分
|
||||
for dimension in final_scores:
|
||||
if dimension_counts[dimension] > 0:
|
||||
final_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
|
||||
|
||||
return final_scores
|
||||
|
||||
def compare_and_display_results(self, questionnaire_scores: Dict, scenario_scores: Dict):
|
||||
"""比较和展示两种测试的结果"""
|
||||
print("\n=== 测评结果对比分析 ===")
|
||||
print("\n" + "=" * 60)
|
||||
print(f"{'维度':<8} {'问卷得分':>10} {'情景得分':>10} {'差异':>10} {'差异程度':>10}")
|
||||
print("-" * 60)
|
||||
|
||||
# 收集每个维度的得分用于统计分析
|
||||
questionnaire_values = []
|
||||
scenario_values = []
|
||||
diffs = []
|
||||
|
||||
for dimension in self.dimensions:
|
||||
q_score = questionnaire_scores[dimension]
|
||||
s_score = scenario_scores[dimension]
|
||||
diff = round(abs(q_score - s_score), 2)
|
||||
|
||||
questionnaire_values.append(q_score)
|
||||
scenario_values.append(s_score)
|
||||
diffs.append(diff)
|
||||
|
||||
# 计算差异程度
|
||||
diff_level = "低" if diff < 0.5 else "中" if diff < 1.0 else "高"
|
||||
print(f"{dimension:<8} {q_score:>10.2f} {s_score:>10.2f} {diff:>10.2f} {diff_level:>10}")
|
||||
|
||||
print("=" * 60)
|
||||
|
||||
# 计算整体统计指标
|
||||
mean_diff = sum(diffs) / len(diffs)
|
||||
std_diff = (sum((x - mean_diff) ** 2 for x in diffs) / (len(diffs) - 1)) ** 0.5
|
||||
|
||||
# 计算效应量 (Cohen's d)
|
||||
pooled_std = (
|
||||
(
|
||||
sum((x - sum(questionnaire_values) / len(questionnaire_values)) ** 2 for x in questionnaire_values)
|
||||
+ sum((x - sum(scenario_values) / len(scenario_values)) ** 2 for x in scenario_values)
|
||||
)
|
||||
/ (2 * len(self.dimensions) - 2)
|
||||
) ** 0.5
|
||||
|
||||
if pooled_std != 0:
|
||||
cohens_d = abs(mean_diff / pooled_std)
|
||||
|
||||
# 解释效应量
|
||||
if cohens_d < 0.2:
|
||||
effect_size = "微小"
|
||||
elif cohens_d < 0.5:
|
||||
effect_size = "小"
|
||||
elif cohens_d < 0.8:
|
||||
effect_size = "中等"
|
||||
else:
|
||||
effect_size = "大"
|
||||
|
||||
# 对所有维度进行整体t检验
|
||||
t_stat, p_value = stats.ttest_rel(questionnaire_values, scenario_values)
|
||||
print("\n整体统计分析:")
|
||||
print(f"平均差异: {mean_diff:.3f}")
|
||||
print(f"差异标准差: {std_diff:.3f}")
|
||||
print(f"效应量(Cohen's d): {cohens_d:.3f}")
|
||||
print(f"效应量大小: {effect_size}")
|
||||
print(f"t统计量: {t_stat:.3f}")
|
||||
print(f"p值: {p_value:.3f}")
|
||||
|
||||
if p_value < 0.05:
|
||||
print("结论: 两种测评方法的结果存在显著差异 (p < 0.05)")
|
||||
else:
|
||||
print("结论: 两种测评方法的结果无显著差异 (p >= 0.05)")
|
||||
|
||||
print("\n维度说明:")
|
||||
for dimension in self.dimensions:
|
||||
print(f"\n{dimension}:")
|
||||
desc = FACTOR_DESCRIPTIONS[dimension]
|
||||
print(f"定义:{desc['description']}")
|
||||
print(f"特征词:{', '.join(desc['trait_words'])}")
|
||||
|
||||
# 分析显著差异
|
||||
significant_diffs = []
|
||||
for dimension in self.dimensions:
|
||||
diff = abs(questionnaire_scores[dimension] - scenario_scores[dimension])
|
||||
if diff >= 1.0: # 差异大于等于1分视为显著
|
||||
significant_diffs.append(
|
||||
{
|
||||
"dimension": dimension,
|
||||
"diff": diff,
|
||||
"questionnaire": questionnaire_scores[dimension],
|
||||
"scenario": scenario_scores[dimension],
|
||||
}
|
||||
)
|
||||
|
||||
if significant_diffs:
|
||||
print("\n\n显著差异分析:")
|
||||
print("-" * 40)
|
||||
for diff in significant_diffs:
|
||||
print(f"\n{diff['dimension']}维度的测评结果存在显著差异:")
|
||||
print(f"问卷得分:{diff['questionnaire']:.2f}")
|
||||
print(f"情景得分:{diff['scenario']:.2f}")
|
||||
print(f"差异值:{diff['diff']:.2f}")
|
||||
|
||||
# 分析可能的原因
|
||||
if diff["questionnaire"] > diff["scenario"]:
|
||||
print("可能原因:在问卷中的自我评价较高,但在具体情景中的表现较为保守。")
|
||||
else:
|
||||
print("可能原因:在具体情景中表现出更多该维度特征,而在问卷自评时较为保守。")
|
||||
|
||||
def save_results(self, questionnaire_scores: Dict, scenario_scores: Dict):
|
||||
"""保存测试结果"""
|
||||
results = {
|
||||
"测试时间": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"问卷测评结果": questionnaire_scores,
|
||||
"情景测评结果": scenario_scores,
|
||||
"维度说明": FACTOR_DESCRIPTIONS,
|
||||
}
|
||||
|
||||
# 确保目录存在
|
||||
os.makedirs("results", exist_ok=True)
|
||||
|
||||
# 生成带时间戳的文件名
|
||||
filename = f"results/personality_combined_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
||||
|
||||
# 保存到文件
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
json.dump(results, f, ensure_ascii=False, indent=2)
|
||||
|
||||
print(f"\n完整的测评结果已保存到:{filename}")
|
||||
|
||||
|
||||
def load_existing_results():
|
||||
"""检查并加载已有的测试结果"""
|
||||
results_dir = "results"
|
||||
if not os.path.exists(results_dir):
|
||||
return None
|
||||
|
||||
# 获取所有personality_combined开头的文件
|
||||
result_files = [f for f in os.listdir(results_dir) if f.startswith("personality_combined_") and f.endswith(".json")]
|
||||
|
||||
if not result_files:
|
||||
return None
|
||||
|
||||
# 按文件修改时间排序,获取最新的结果文件
|
||||
latest_file = max(result_files, key=lambda f: os.path.getmtime(os.path.join(results_dir, f)))
|
||||
|
||||
print(f"\n发现已有的测试结果:{latest_file}")
|
||||
try:
|
||||
with open(os.path.join(results_dir, latest_file), "r", encoding="utf-8") as f:
|
||||
results = json.load(f)
|
||||
return results
|
||||
except Exception as e:
|
||||
print(f"读取结果文件时出错:{str(e)}")
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
test = CombinedPersonalityTest()
|
||||
|
||||
# 检查是否存在已有结果
|
||||
existing_results = load_existing_results()
|
||||
|
||||
if existing_results:
|
||||
print("\n=== 使用已有测试结果进行分析 ===")
|
||||
print(f"测试时间:{existing_results['测试时间']}")
|
||||
|
||||
questionnaire_scores = existing_results["问卷测评结果"]
|
||||
scenario_scores = existing_results["情景测评结果"]
|
||||
|
||||
# 直接进行结果对比分析
|
||||
test.compare_and_display_results(questionnaire_scores, scenario_scores)
|
||||
else:
|
||||
print("\n未找到已有的测试结果,开始新的测试...")
|
||||
test.run_combined_test()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,123 +0,0 @@
|
||||
import asyncio
|
||||
import os
|
||||
import time
|
||||
from typing import Tuple, Union
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
logger = get_module_logger("offline_llm")
|
||||
|
||||
|
||||
class LLMModel:
|
||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
self.api_key = os.getenv("SILICONFLOW_KEY")
|
||||
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
|
||||
|
||||
if not self.api_key or not self.base_url:
|
||||
raise ValueError("环境变量未正确加载:SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
|
||||
|
||||
logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
|
||||
|
||||
def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
||||
|
||||
# 构建请求体
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.5,
|
||||
**self.params,
|
||||
}
|
||||
|
||||
# 发送请求到完整的 chat/completions 端点
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15 # 基础等待时间(秒)
|
||||
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
|
||||
if response.status_code == 429:
|
||||
wait_time = base_wait_time * (2**retry) # 指数退避
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
time.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2**retry)
|
||||
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
logger.error(f"请求失败: {str(e)}")
|
||||
return f"请求失败: {str(e)}", ""
|
||||
|
||||
logger.error("达到最大重试次数,请求仍然失败")
|
||||
return "达到最大重试次数,请求仍然失败", ""
|
||||
|
||||
async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
|
||||
"""异步方式根据输入的提示生成模型的响应"""
|
||||
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
||||
|
||||
# 构建请求体
|
||||
data = {
|
||||
"model": self.model_name,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.5,
|
||||
**self.params,
|
||||
}
|
||||
|
||||
# 发送请求到完整的 chat/completions 端点
|
||||
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
|
||||
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
|
||||
|
||||
max_retries = 3
|
||||
base_wait_time = 15
|
||||
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
async with session.post(api_url, headers=headers, json=data) as response:
|
||||
if response.status == 429:
|
||||
wait_time = base_wait_time * (2**retry) # 指数退避
|
||||
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
|
||||
await asyncio.sleep(wait_time)
|
||||
continue
|
||||
|
||||
response.raise_for_status() # 检查其他响应状态
|
||||
|
||||
result = await response.json()
|
||||
if "choices" in result and len(result["choices"]) > 0:
|
||||
content = result["choices"][0]["message"]["content"]
|
||||
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
|
||||
return content, reasoning_content
|
||||
return "没有返回结果", ""
|
||||
|
||||
except Exception as e:
|
||||
if retry < max_retries - 1: # 如果还有重试机会
|
||||
wait_time = base_wait_time * (2**retry)
|
||||
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
|
||||
await asyncio.sleep(wait_time)
|
||||
else:
|
||||
logger.error(f"请求失败: {str(e)}")
|
||||
return f"请求失败: {str(e)}", ""
|
||||
|
||||
logger.error("达到最大重试次数,请求仍然失败")
|
||||
return "达到最大重试次数,请求仍然失败", ""
|
||||
@@ -1,142 +0,0 @@
|
||||
# 人格测试问卷题目
|
||||
# 王孟成, 戴晓阳, & 姚树桥. (2011).
|
||||
# 中国大五人格问卷的初步编制Ⅲ:简式版的制定及信效度检验. 中国临床心理学杂志, 19(04), Article 04.
|
||||
|
||||
# 王孟成, 戴晓阳, & 姚树桥. (2010).
|
||||
# 中国大五人格问卷的初步编制Ⅰ:理论框架与信度分析. 中国临床心理学杂志, 18(05), Article 05.
|
||||
|
||||
PERSONALITY_QUESTIONS = [
|
||||
# 神经质维度 (F1)
|
||||
{"id": 1, "content": "我常担心有什么不好的事情要发生", "factor": "神经质", "reverse_scoring": False},
|
||||
{"id": 2, "content": "我常感到害怕", "factor": "神经质", "reverse_scoring": False},
|
||||
{"id": 3, "content": "有时我觉得自己一无是处", "factor": "神经质", "reverse_scoring": False},
|
||||
{"id": 4, "content": "我很少感到忧郁或沮丧", "factor": "神经质", "reverse_scoring": True},
|
||||
{"id": 5, "content": "别人一句漫不经心的话,我常会联系在自己身上", "factor": "神经质", "reverse_scoring": False},
|
||||
{"id": 6, "content": "在面对压力时,我有种快要崩溃的感觉", "factor": "神经质", "reverse_scoring": False},
|
||||
{"id": 7, "content": "我常担忧一些无关紧要的事情", "factor": "神经质", "reverse_scoring": False},
|
||||
{"id": 8, "content": "我常常感到内心不踏实", "factor": "神经质", "reverse_scoring": False},
|
||||
# 严谨性维度 (F2)
|
||||
{"id": 9, "content": "在工作上,我常只求能应付过去便可", "factor": "严谨性", "reverse_scoring": True},
|
||||
{"id": 10, "content": "一旦确定了目标,我会坚持努力地实现它", "factor": "严谨性", "reverse_scoring": False},
|
||||
{"id": 11, "content": "我常常是仔细考虑之后才做出决定", "factor": "严谨性", "reverse_scoring": False},
|
||||
{"id": 12, "content": "别人认为我是个慎重的人", "factor": "严谨性", "reverse_scoring": False},
|
||||
{"id": 13, "content": "做事讲究逻辑和条理是我的一个特点", "factor": "严谨性", "reverse_scoring": False},
|
||||
{"id": 14, "content": "我喜欢一开头就把事情计划好", "factor": "严谨性", "reverse_scoring": False},
|
||||
{"id": 15, "content": "我工作或学习很勤奋", "factor": "严谨性", "reverse_scoring": False},
|
||||
{"id": 16, "content": "我是个倾尽全力做事的人", "factor": "严谨性", "reverse_scoring": False},
|
||||
# 宜人性维度 (F3)
|
||||
{
|
||||
"id": 17,
|
||||
"content": "尽管人类社会存在着一些阴暗的东西(如战争、罪恶、欺诈),我仍然相信人性总的来说是善良的",
|
||||
"factor": "宜人性",
|
||||
"reverse_scoring": False,
|
||||
},
|
||||
{"id": 18, "content": "我觉得大部分人基本上是心怀善意的", "factor": "宜人性", "reverse_scoring": False},
|
||||
{"id": 19, "content": "虽然社会上有骗子,但我觉得大部分人还是可信的", "factor": "宜人性", "reverse_scoring": False},
|
||||
{"id": 20, "content": "我不太关心别人是否受到不公正的待遇", "factor": "宜人性", "reverse_scoring": True},
|
||||
{"id": 21, "content": "我时常觉得别人的痛苦与我无关", "factor": "宜人性", "reverse_scoring": True},
|
||||
{"id": 22, "content": "我常为那些遭遇不幸的人感到难过", "factor": "宜人性", "reverse_scoring": False},
|
||||
{"id": 23, "content": "我是那种只照顾好自己,不替别人担忧的人", "factor": "宜人性", "reverse_scoring": True},
|
||||
{"id": 24, "content": "当别人向我诉说不幸时,我常感到难过", "factor": "宜人性", "reverse_scoring": False},
|
||||
# 开放性维度 (F4)
|
||||
{"id": 25, "content": "我的想象力相当丰富", "factor": "开放性", "reverse_scoring": False},
|
||||
{"id": 26, "content": "我头脑中经常充满生动的画面", "factor": "开放性", "reverse_scoring": False},
|
||||
{"id": 27, "content": "我对许多事情有着很强的好奇心", "factor": "开放性", "reverse_scoring": False},
|
||||
{"id": 28, "content": "我喜欢冒险", "factor": "开放性", "reverse_scoring": False},
|
||||
{"id": 29, "content": "我是个勇于冒险,突破常规的人", "factor": "开放性", "reverse_scoring": False},
|
||||
{"id": 30, "content": "我身上具有别人没有的冒险精神", "factor": "开放性", "reverse_scoring": False},
|
||||
{
|
||||
"id": 31,
|
||||
"content": "我渴望学习一些新东西,即使它们与我的日常生活无关",
|
||||
"factor": "开放性",
|
||||
"reverse_scoring": False,
|
||||
},
|
||||
{
|
||||
"id": 32,
|
||||
"content": "我很愿意也很容易接受那些新事物、新观点、新想法",
|
||||
"factor": "开放性",
|
||||
"reverse_scoring": False,
|
||||
},
|
||||
# 外向性维度 (F5)
|
||||
{"id": 33, "content": "我喜欢参加社交与娱乐聚会", "factor": "外向性", "reverse_scoring": False},
|
||||
{"id": 34, "content": "我对人多的聚会感到乏味", "factor": "外向性", "reverse_scoring": True},
|
||||
{"id": 35, "content": "我尽量避免参加人多的聚会和嘈杂的环境", "factor": "外向性", "reverse_scoring": True},
|
||||
{"id": 36, "content": "在热闹的聚会上,我常常表现主动并尽情玩耍", "factor": "外向性", "reverse_scoring": False},
|
||||
{"id": 37, "content": "有我在的场合一般不会冷场", "factor": "外向性", "reverse_scoring": False},
|
||||
{"id": 38, "content": "我希望成为领导者而不是被领导者", "factor": "外向性", "reverse_scoring": False},
|
||||
{"id": 39, "content": "在一个团体中,我希望处于领导地位", "factor": "外向性", "reverse_scoring": False},
|
||||
{"id": 40, "content": "别人多认为我是一个热情和友好的人", "factor": "外向性", "reverse_scoring": False},
|
||||
]
|
||||
|
||||
# 因子维度说明
|
||||
FACTOR_DESCRIPTIONS = {
|
||||
"外向性": {
|
||||
"description": "反映个体神经系统的强弱和动力特征。外向性主要表现为个体在人际交往和社交活动中的倾向性,"
|
||||
"包括对社交活动的兴趣、"
|
||||
"对人群的态度、社交互动中的主动程度以及在群体中的影响力。高分者倾向于积极参与社交活动,乐于与人交往,善于表达自我,"
|
||||
"并往往在群体中发挥领导作用;低分者则倾向于独处,不喜欢热闹的社交场合,表现出内向、安静的特征。",
|
||||
"trait_words": ["热情", "活力", "社交", "主动"],
|
||||
"subfactors": {
|
||||
"合群性": "个体愿意与他人聚在一起,即接近人群的倾向;高分表现乐群、好交际,低分表现封闭、独处",
|
||||
"热情": "个体对待别人时所表现出的态度;高分表现热情好客,低分表现冷淡",
|
||||
"支配性": "个体喜欢指使、操纵他人,倾向于领导别人的特点;高分表现好强、发号施令,低分表现顺从、低调",
|
||||
"活跃": "个体精力充沛,活跃、主动性等特点;高分表现活跃,低分表现安静",
|
||||
},
|
||||
},
|
||||
"神经质": {
|
||||
"description": "反映个体情绪的状态和体验内心苦恼的倾向性。这个维度主要关注个体在面对压力、"
|
||||
"挫折和日常生活挑战时的情绪稳定性和适应能力。它包含了对焦虑、抑郁、愤怒等负面情绪的敏感程度,"
|
||||
"以及个体对这些情绪的调节和控制能力。高分者容易体验负面情绪,对压力较为敏感,情绪波动较大;"
|
||||
"低分者则表现出较强的情绪稳定性,能够较好地应对压力和挫折。",
|
||||
"trait_words": ["稳定", "沉着", "从容", "坚韧"],
|
||||
"subfactors": {
|
||||
"焦虑": "个体体验焦虑感的个体差异;高分表现坐立不安,低分表现平静",
|
||||
"抑郁": "个体体验抑郁情感的个体差异;高分表现郁郁寡欢,低分表现平静",
|
||||
"敏感多疑": "个体常常关注自己的内心活动,行为和过于意识人对自己的看法、评价;高分表现敏感多疑,"
|
||||
"低分表现淡定、自信",
|
||||
"脆弱性": "个体在危机或困难面前无力、脆弱的特点;高分表现无能、易受伤、逃避,低分表现坚强",
|
||||
"愤怒-敌意": "个体准备体验愤怒,及相关情绪的状态;高分表现暴躁易怒,低分表现平静",
|
||||
},
|
||||
},
|
||||
"严谨性": {
|
||||
"description": "反映个体在目标导向行为上的组织、坚持和动机特征。这个维度体现了个体在工作、"
|
||||
"学习等目标性活动中的自我约束和行为管理能力。它涉及到个体的责任感、自律性、计划性、条理性以及完成任务的态度。"
|
||||
"高分者往往表现出强烈的责任心、良好的组织能力、谨慎的决策风格和持续的努力精神;低分者则可能表现出随意性强、"
|
||||
"缺乏规划、做事马虎或易放弃的特点。",
|
||||
"trait_words": ["负责", "自律", "条理", "勤奋"],
|
||||
"subfactors": {
|
||||
"责任心": "个体对待任务和他人认真负责,以及对自己承诺的信守;高分表现有责任心、负责任,"
|
||||
"低分表现推卸责任、逃避处罚",
|
||||
"自我控制": "个体约束自己的能力,及自始至终的坚持性;高分表现自制、有毅力,低分表现冲动、无毅力",
|
||||
"审慎性": "个体在采取具体行动前的心理状态;高分表现谨慎、小心,低分表现鲁莽、草率",
|
||||
"条理性": "个体处理事务和工作的秩序,条理和逻辑性;高分表现整洁、有秩序,低分表现混乱、遗漏",
|
||||
"勤奋": "个体工作和学习的努力程度及为达到目标而表现出的进取精神;高分表现勤奋、刻苦,低分表现懒散",
|
||||
},
|
||||
},
|
||||
"开放性": {
|
||||
"description": "反映个体对新异事物、新观念和新经验的接受程度,以及在思维和行为方面的创新倾向。"
|
||||
"这个维度体现了个体在认知和体验方面的广度、深度和灵活性。它包括对艺术的欣赏能力、对知识的求知欲、想象力的丰富程度,"
|
||||
"以及对冒险和创新的态度。高分者往往具有丰富的想象力、广泛的兴趣、开放的思维方式和创新的倾向;低分者则倾向于保守、"
|
||||
"传统,喜欢熟悉和常规的事物。",
|
||||
"trait_words": ["创新", "好奇", "艺术", "冒险"],
|
||||
"subfactors": {
|
||||
"幻想": "个体富于幻想和想象的水平;高分表现想象力丰富,低分表现想象力匮乏",
|
||||
"审美": "个体对于艺术和美的敏感与热爱程度;高分表现富有艺术气息,低分表现一般对艺术不敏感",
|
||||
"好奇心": "个体对未知事物的态度;高分表现兴趣广泛、好奇心浓,低分表现兴趣少、无好奇心",
|
||||
"冒险精神": "个体愿意尝试有风险活动的个体差异;高分表现好冒险,低分表现保守",
|
||||
"价值观念": "个体对新事物、新观念、怪异想法的态度;高分表现开放、坦然接受新事物,低分则相反",
|
||||
},
|
||||
},
|
||||
"宜人性": {
|
||||
"description": "反映个体在人际关系中的亲和倾向,体现了对他人的关心、同情和合作意愿。"
|
||||
"这个维度主要关注个体与他人互动时的态度和行为特征,包括对他人的信任程度、同理心水平、"
|
||||
"助人意愿以及在人际冲突中的处理方式。高分者通常表现出友善、富有同情心、乐于助人的特质,善于与他人建立和谐关系;"
|
||||
"低分者则可能表现出较少的人际关注,在社交互动中更注重自身利益,较少考虑他人感受。",
|
||||
"trait_words": ["友善", "同理", "信任", "合作"],
|
||||
"subfactors": {
|
||||
"信任": "个体对他人和/或他人言论的相信程度;高分表现信任他人,低分表现怀疑",
|
||||
"体贴": "个体对别人的兴趣和需要的关注程度;高分表现体贴、温存,低分表现冷漠、不在乎",
|
||||
"同情": "个体对处于不利地位的人或物的态度;高分表现富有同情心,低分表现冷漠",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
"""
|
||||
The definition of artificial personality in this paper follows the dispositional para-digm and adapts a definition of
|
||||
personality developed for humans [17]:
|
||||
Personality for a human is the "whole and organisation of relatively stable tendencies and patterns of experience and
|
||||
behaviour within one person (distinguishing it from other persons)". This definition is modified for artificial
|
||||
personality:
|
||||
Artificial personality describes the relatively stable tendencies and patterns of behav-iour of an AI-based machine that
|
||||
can be designed by developers and designers via different modalities, such as language, creating the impression
|
||||
of individuality of a humanized social agent when users interact with the machine."""
|
||||
|
||||
from typing import Dict, List
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
import sys
|
||||
|
||||
"""
|
||||
第一种方案:基于情景评估的人格测定
|
||||
"""
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
project_root = current_dir.parent.parent.parent
|
||||
env_path = project_root / ".env"
|
||||
|
||||
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
|
||||
sys.path.append(root_path)
|
||||
|
||||
from src.plugins.personality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa: E402
|
||||
from src.plugins.personality.questionnaire import FACTOR_DESCRIPTIONS # noqa: E402
|
||||
from src.plugins.personality.offline_llm import LLMModel # noqa: E402
|
||||
|
||||
# 加载环境变量
|
||||
if env_path.exists():
|
||||
print(f"从 {env_path} 加载环境变量")
|
||||
load_dotenv(env_path)
|
||||
else:
|
||||
print(f"未找到环境变量文件: {env_path}")
|
||||
print("将使用默认配置")
|
||||
|
||||
|
||||
class PersonalityEvaluatorDirect:
|
||||
def __init__(self):
|
||||
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
self.scenarios = []
|
||||
|
||||
# 为每个人格特质获取对应的场景
|
||||
for trait in PERSONALITY_SCENES:
|
||||
scenes = get_scene_by_factor(trait)
|
||||
if not scenes:
|
||||
continue
|
||||
|
||||
# 从每个维度选择3个场景
|
||||
import random
|
||||
|
||||
scene_keys = list(scenes.keys())
|
||||
selected_scenes = random.sample(scene_keys, min(3, len(scene_keys)))
|
||||
|
||||
for scene_key in selected_scenes:
|
||||
scene = scenes[scene_key]
|
||||
|
||||
# 为每个场景添加评估维度
|
||||
# 主维度是当前特质,次维度随机选择一个其他特质
|
||||
other_traits = [t for t in PERSONALITY_SCENES if t != trait]
|
||||
secondary_trait = random.choice(other_traits)
|
||||
|
||||
self.scenarios.append(
|
||||
{"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
|
||||
)
|
||||
|
||||
self.llm = LLMModel()
|
||||
|
||||
def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]:
|
||||
"""
|
||||
使用 DeepSeek AI 评估用户对特定场景的反应
|
||||
"""
|
||||
# 构建维度描述
|
||||
dimension_descriptions = []
|
||||
for dim in dimensions:
|
||||
desc = FACTOR_DESCRIPTIONS.get(dim, "")
|
||||
if desc:
|
||||
dimension_descriptions.append(f"- {dim}:{desc}")
|
||||
|
||||
dimensions_text = "\n".join(dimension_descriptions)
|
||||
|
||||
prompt = f"""请根据以下场景和用户描述,评估用户在大五人格模型中的相关维度得分(1-6分)。
|
||||
|
||||
场景描述:
|
||||
{scenario}
|
||||
|
||||
用户回应:
|
||||
{response}
|
||||
|
||||
需要评估的维度说明:
|
||||
{dimensions_text}
|
||||
|
||||
请按照以下格式输出评估结果(仅输出JSON格式):
|
||||
{{
|
||||
"{dimensions[0]}": 分数,
|
||||
"{dimensions[1]}": 分数
|
||||
}}
|
||||
|
||||
评分标准:
|
||||
1 = 非常不符合该维度特征
|
||||
2 = 比较不符合该维度特征
|
||||
3 = 有点不符合该维度特征
|
||||
4 = 有点符合该维度特征
|
||||
5 = 比较符合该维度特征
|
||||
6 = 非常符合该维度特征
|
||||
|
||||
请根据用户的回应,结合场景和维度说明进行评分。确保分数在1-6之间,并给出合理的评估。"""
|
||||
|
||||
try:
|
||||
ai_response, _ = self.llm.generate_response(prompt)
|
||||
# 尝试从AI响应中提取JSON部分
|
||||
start_idx = ai_response.find("{")
|
||||
end_idx = ai_response.rfind("}") + 1
|
||||
if start_idx != -1 and end_idx != 0:
|
||||
json_str = ai_response[start_idx:end_idx]
|
||||
scores = json.loads(json_str)
|
||||
# 确保所有分数在1-6之间
|
||||
return {k: max(1, min(6, float(v))) for k, v in scores.items()}
|
||||
else:
|
||||
print("AI响应格式不正确,使用默认评分")
|
||||
return {dim: 3.5 for dim in dimensions}
|
||||
except Exception as e:
|
||||
print(f"评估过程出错:{str(e)}")
|
||||
return {dim: 3.5 for dim in dimensions}
|
||||
|
||||
|
||||
def main():
|
||||
print("欢迎使用人格形象创建程序!")
|
||||
print("接下来,您将面对一系列场景(共15个)。请根据您想要创建的角色形象,描述在该场景下可能的反应。")
|
||||
print("每个场景都会评估不同的人格维度,最终得出完整的人格特征评估。")
|
||||
print("评分标准:1=非常不符合,2=比较不符合,3=有点不符合,4=有点符合,5=比较符合,6=非常符合")
|
||||
print("\n准备好了吗?按回车键开始...")
|
||||
input()
|
||||
|
||||
evaluator = PersonalityEvaluatorDirect()
|
||||
final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
dimension_counts = {trait: 0 for trait in final_scores.keys()}
|
||||
|
||||
for i, scenario_data in enumerate(evaluator.scenarios, 1):
|
||||
print(f"\n场景 {i}/{len(evaluator.scenarios)} - {scenario_data['场景编号']}:")
|
||||
print("-" * 50)
|
||||
print(scenario_data["场景"])
|
||||
print("\n请描述您的角色在这种情况下会如何反应:")
|
||||
response = input().strip()
|
||||
|
||||
if not response:
|
||||
print("反应描述不能为空!")
|
||||
continue
|
||||
|
||||
print("\n正在评估您的描述...")
|
||||
scores = evaluator.evaluate_response(scenario_data["场景"], response, scenario_data["评估维度"])
|
||||
|
||||
# 更新最终分数
|
||||
for dimension, score in scores.items():
|
||||
final_scores[dimension] += score
|
||||
dimension_counts[dimension] += 1
|
||||
|
||||
print("\n当前评估结果:")
|
||||
print("-" * 30)
|
||||
for dimension, score in scores.items():
|
||||
print(f"{dimension}: {score}/6")
|
||||
|
||||
if i < len(evaluator.scenarios):
|
||||
print("\n按回车键继续下一个场景...")
|
||||
input()
|
||||
|
||||
# 计算平均分
|
||||
for dimension in final_scores:
|
||||
if dimension_counts[dimension] > 0:
|
||||
final_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
|
||||
|
||||
print("\n最终人格特征评估结果:")
|
||||
print("-" * 30)
|
||||
for trait, score in final_scores.items():
|
||||
print(f"{trait}: {score}/6")
|
||||
print(f"测试场景数:{dimension_counts[trait]}")
|
||||
|
||||
# 保存结果
|
||||
result = {"final_scores": final_scores, "dimension_counts": dimension_counts, "scenarios": evaluator.scenarios}
|
||||
|
||||
# 确保目录存在
|
||||
os.makedirs("results", exist_ok=True)
|
||||
|
||||
# 保存到文件
|
||||
with open("results/personality_result.json", "w", encoding="utf-8") as f:
|
||||
json.dump(result, f, ensure_ascii=False, indent=2)
|
||||
|
||||
print("\n结果已保存到 results/personality_result.json")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,156 +0,0 @@
|
||||
import random
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import datetime
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
project_root = current_dir.parent.parent.parent
|
||||
env_path = project_root / ".env"
|
||||
|
||||
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
|
||||
sys.path.append(root_path)
|
||||
|
||||
from src.common.database import db # noqa: E402
|
||||
|
||||
|
||||
class MessageAnalyzer:
|
||||
def __init__(self):
|
||||
self.messages_collection = db["messages"]
|
||||
|
||||
def get_message_context(self, message_id: int, context_length: int = 5) -> Optional[List[Dict]]:
|
||||
"""
|
||||
获取指定消息ID的上下文消息列表
|
||||
|
||||
Args:
|
||||
message_id (int): 消息ID
|
||||
context_length (int): 上下文长度(单侧,总长度为 2*context_length + 1)
|
||||
|
||||
Returns:
|
||||
Optional[List[Dict]]: 消息列表,如果未找到则返回None
|
||||
"""
|
||||
# 从数据库获取指定消息
|
||||
target_message = self.messages_collection.find_one({"message_id": message_id})
|
||||
if not target_message:
|
||||
return None
|
||||
|
||||
# 获取该消息的stream_id
|
||||
stream_id = target_message.get("chat_info", {}).get("stream_id")
|
||||
if not stream_id:
|
||||
return None
|
||||
|
||||
# 获取同一stream_id的所有消息
|
||||
stream_messages = list(self.messages_collection.find({"chat_info.stream_id": stream_id}).sort("time", 1))
|
||||
|
||||
# 找到目标消息在列表中的位置
|
||||
target_index = None
|
||||
for i, msg in enumerate(stream_messages):
|
||||
if msg["message_id"] == message_id:
|
||||
target_index = i
|
||||
break
|
||||
|
||||
if target_index is None:
|
||||
return None
|
||||
|
||||
# 获取目标消息前后的消息
|
||||
start_index = max(0, target_index - context_length)
|
||||
end_index = min(len(stream_messages), target_index + context_length + 1)
|
||||
|
||||
return stream_messages[start_index:end_index]
|
||||
|
||||
def format_messages(self, messages: List[Dict], target_message_id: Optional[int] = None) -> str:
|
||||
"""
|
||||
格式化消息列表为可读字符串
|
||||
|
||||
Args:
|
||||
messages (List[Dict]): 消息列表
|
||||
target_message_id (Optional[int]): 目标消息ID,用于标记
|
||||
|
||||
Returns:
|
||||
str: 格式化的消息字符串
|
||||
"""
|
||||
if not messages:
|
||||
return "没有消息记录"
|
||||
|
||||
reply = ""
|
||||
for msg in messages:
|
||||
# 消息时间
|
||||
msg_time = datetime.datetime.fromtimestamp(int(msg["time"])).strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
# 获取消息内容
|
||||
message_text = msg.get("processed_plain_text", msg.get("detailed_plain_text", "无消息内容"))
|
||||
nickname = msg.get("user_info", {}).get("user_nickname", "未知用户")
|
||||
|
||||
# 标记当前消息
|
||||
is_target = "→ " if target_message_id and msg["message_id"] == target_message_id else " "
|
||||
|
||||
reply += f"{is_target}[{msg_time}] {nickname}: {message_text}\n"
|
||||
|
||||
if target_message_id and msg["message_id"] == target_message_id:
|
||||
reply += " " + "-" * 50 + "\n"
|
||||
|
||||
return reply
|
||||
|
||||
def get_user_random_contexts(
|
||||
self, qq_id: str, num_messages: int = 10, context_length: int = 5
|
||||
) -> tuple[List[str], str]: # noqa: E501
|
||||
"""
|
||||
获取用户的随机消息及其上下文
|
||||
|
||||
Args:
|
||||
qq_id (str): QQ号
|
||||
num_messages (int): 要获取的随机消息数量
|
||||
context_length (int): 每条消息的上下文长度(单侧)
|
||||
|
||||
Returns:
|
||||
tuple[List[str], str]: (每个消息上下文的格式化字符串列表, 用户昵称)
|
||||
"""
|
||||
if not qq_id:
|
||||
return [], ""
|
||||
|
||||
# 获取用户所有消息
|
||||
all_messages = list(self.messages_collection.find({"user_info.user_id": int(qq_id)}))
|
||||
if not all_messages:
|
||||
return [], ""
|
||||
|
||||
# 获取用户昵称
|
||||
user_nickname = all_messages[0].get("chat_info", {}).get("user_info", {}).get("user_nickname", "未知用户")
|
||||
|
||||
# 随机选择指定数量的消息
|
||||
selected_messages = random.sample(all_messages, min(num_messages, len(all_messages)))
|
||||
# 按时间排序
|
||||
selected_messages.sort(key=lambda x: int(x["time"]))
|
||||
|
||||
# 存储所有上下文消息
|
||||
context_list = []
|
||||
|
||||
# 获取每条消息的上下文
|
||||
for msg in selected_messages:
|
||||
message_id = msg["message_id"]
|
||||
|
||||
# 获取消息上下文
|
||||
context_messages = self.get_message_context(message_id, context_length)
|
||||
if context_messages:
|
||||
formatted_context = self.format_messages(context_messages, message_id)
|
||||
context_list.append(formatted_context)
|
||||
|
||||
return context_list, user_nickname
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 测试代码
|
||||
analyzer = MessageAnalyzer()
|
||||
test_qq = "1026294844" # 替换为要测试的QQ号
|
||||
print(f"测试QQ号: {test_qq}")
|
||||
print("-" * 50)
|
||||
# 获取5条消息,每条消息前后各3条上下文
|
||||
contexts, nickname = analyzer.get_user_random_contexts(test_qq, num_messages=5, context_length=3)
|
||||
|
||||
print(f"用户昵称: {nickname}\n")
|
||||
# 打印每个上下文
|
||||
for i, context in enumerate(contexts, 1):
|
||||
print(f"\n随机消息 {i}/{len(contexts)}:")
|
||||
print("-" * 30)
|
||||
print(context)
|
||||
print("=" * 50)
|
||||
@@ -1 +0,0 @@
|
||||
那是以后会用到的妙妙小工具.jpg
|
||||
@@ -6,7 +6,7 @@ import os
|
||||
import json
|
||||
import threading
|
||||
from src.common.logger import get_module_logger
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
|
||||
logger = get_module_logger("remote")
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from src.plugins.config.config import global_config
|
||||
from src.config.config import global_config
|
||||
from src.plugins.chat.message import MessageRecv, MessageSending, Message
|
||||
from src.common.database import db
|
||||
import time
|
||||
|
||||
@@ -12,7 +12,7 @@ sys.path.append(root_path)
|
||||
from src.common.database import db # noqa: E402
|
||||
from src.common.logger import get_module_logger, SCHEDULE_STYLE_CONFIG, LogConfig # noqa: E402
|
||||
from src.plugins.models.utils_model import LLMRequest # noqa: E402
|
||||
from src.plugins.config.config import global_config # noqa: E402
|
||||
from src.config.config import global_config # noqa: E402
|
||||
|
||||
TIME_ZONE = tz.gettz(global_config.TIME_ZONE) # 设置时区
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import List, Optional
|
||||
|
||||
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ...config.config import global_config
|
||||
from src.common.logger import get_module_logger, LogConfig, TOPIC_STYLE_CONFIG
|
||||
|
||||
# 定义日志配置
|
||||
|
||||
@@ -74,9 +74,3 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
|
||||
async def not_reply_handle(self, message_id):
|
||||
return await super().not_reply_handle(message_id)
|
||||
|
||||
async def get_variable_parameters(self):
|
||||
return await super().get_variable_parameters()
|
||||
|
||||
async def set_variable_parameters(self, parameters):
|
||||
return await super().set_variable_parameters(parameters)
|
||||
|
||||
@@ -234,9 +234,3 @@ class DynamicWillingManager(BaseWillingManager):
|
||||
|
||||
async def after_generate_reply_handle(self, message_id):
|
||||
return await super().after_generate_reply_handle(message_id)
|
||||
|
||||
async def get_variable_parameters(self):
|
||||
return await super().get_variable_parameters()
|
||||
|
||||
async def set_variable_parameters(self, parameters):
|
||||
return await super().set_variable_parameters(parameters)
|
||||
|
||||
157
src/plugins/willing/mode_llmcheck.py
Normal file
157
src/plugins/willing/mode_llmcheck.py
Normal file
@@ -0,0 +1,157 @@
|
||||
"""
|
||||
llmcheck 模式:
|
||||
此模式的一些参数不会在配置文件中显示,要修改请在可变参数下修改
|
||||
此模式的特点:
|
||||
1.在群聊内的连续对话场景下,使用大语言模型来判断回复概率
|
||||
2.非连续对话场景,使用mxp模式的意愿管理器(可另外配置)
|
||||
3.默认配置的是model_v3,当前参数适用于deepseek-v3-0324
|
||||
|
||||
继承自其他模式,实质上仅重写get_reply_probability方法,未来可能重构成一个插件,可方便地组装到其他意愿模式上。
|
||||
目前的使用方式是拓展到其他意愿管理模式
|
||||
|
||||
"""
|
||||
|
||||
import time
|
||||
from loguru import logger
|
||||
from ..models.utils_model import LLM_request
|
||||
from ...config.config import global_config
|
||||
|
||||
# from ..chat.chat_stream import ChatStream
|
||||
from ..chat.utils import get_recent_group_detailed_plain_text
|
||||
|
||||
# from .willing_manager import BaseWillingManager
|
||||
from .mode_mxp import MxpWillingManager
|
||||
import re
|
||||
from functools import wraps
|
||||
|
||||
|
||||
def is_continuous_chat(self, message_id: str):
|
||||
# 判断是否是连续对话,出于成本考虑,默认限制5条
|
||||
willing_info = self.ongoing_messages[message_id]
|
||||
chat_id = willing_info.chat_id
|
||||
group_info = willing_info.group_info
|
||||
config = self.global_config
|
||||
length = 5
|
||||
if chat_id:
|
||||
chat_talking_text = get_recent_group_detailed_plain_text(chat_id, limit=length, combine=True)
|
||||
if group_info:
|
||||
if str(config.BOT_QQ) in chat_talking_text:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def llmcheck_decorator(trigger_condition_func):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(self, message_id: str):
|
||||
if trigger_condition_func(self, message_id):
|
||||
# 满足条件,走llm流程
|
||||
return self.get_llmreply_probability(message_id)
|
||||
else:
|
||||
# 不满足条件,走默认流程
|
||||
return func(self, message_id)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class LlmcheckWillingManager(MxpWillingManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.model_v3 = LLM_request(model=global_config.llm_normal, temperature=0.3)
|
||||
|
||||
async def get_llmreply_probability(self, message_id: str):
|
||||
message_info = self.ongoing_messages[message_id]
|
||||
chat_id = message_info.chat_id
|
||||
config = self.global_config
|
||||
# 获取信息的长度
|
||||
length = 5
|
||||
if message_info.group_info and config:
|
||||
if message_info.group_info.group_id not in config.talk_allowed_groups:
|
||||
reply_probability = 0
|
||||
return reply_probability
|
||||
|
||||
current_date = time.strftime("%Y-%m-%d", time.localtime())
|
||||
current_time = time.strftime("%H:%M:%S", time.localtime())
|
||||
chat_talking_prompt = ""
|
||||
if chat_id:
|
||||
chat_talking_prompt = get_recent_group_detailed_plain_text(chat_id, limit=length, combine=True)
|
||||
else:
|
||||
return 0
|
||||
|
||||
# if is_mentioned_bot:
|
||||
# return 1.0
|
||||
prompt = f"""
|
||||
假设你正在查看一个群聊,你在这个群聊里的网名叫{global_config.BOT_NICKNAME},你还有很多别名: {"/".join(global_config.BOT_ALIAS_NAMES)},
|
||||
现在群里聊天的内容是{chat_talking_prompt},
|
||||
今天是{current_date},现在是{current_time}。
|
||||
综合群内的氛围和你自己之前的发言,给出你认为**最新的消息**需要你回复的概率,数值在0到1之间。请注意,群聊内容杂乱,很多时候对话连续,但很可能不是在和你说话。
|
||||
如果最新的消息和你之前的发言在内容上连续,或者提到了你的名字或者称谓,将其视作明确指向你的互动,给出高于0.8的概率。如果现在是睡眠时间,直接概率为0。如果话题内容与你之前不是紧密相关,请不要给出高于0.1的概率。
|
||||
请注意是判断概率,而不是编写回复内容,
|
||||
仅输出在0到1区间内的概率值,不要给出你的判断依据。
|
||||
"""
|
||||
|
||||
content_check, reasoning_check, _ = await self.model_v3.generate_response(prompt)
|
||||
# logger.info(f"{prompt}")
|
||||
logger.info(f"{content_check} {reasoning_check}")
|
||||
probability = self.extract_marked_probability(content_check)
|
||||
# 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整
|
||||
probability += message_info.interested_rate * 0.25
|
||||
probability = min(1.0, probability)
|
||||
if probability <= 0.1:
|
||||
probability = min(0.03, probability)
|
||||
if probability >= 0.8:
|
||||
probability = max(probability, 0.90)
|
||||
|
||||
# 当前表情包理解能力较差,少说就少错
|
||||
if message_info.is_emoji:
|
||||
probability *= global_config.emoji_response_penalty
|
||||
|
||||
return probability
|
||||
|
||||
@staticmethod
|
||||
def extract_marked_probability(text):
|
||||
"""提取带标记的概率值 该方法主要用于测试微调prompt阶段"""
|
||||
text = text.strip()
|
||||
pattern = r"##PROBABILITY_START##(.*?)##PROBABILITY_END##"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
if match:
|
||||
prob_str = match.group(1).strip()
|
||||
# 处理百分比(65% → 0.65)
|
||||
if "%" in prob_str:
|
||||
return float(prob_str.replace("%", "")) / 100
|
||||
# 处理分数(2/3 → 0.666...)
|
||||
elif "/" in prob_str:
|
||||
numerator, denominator = map(float, prob_str.split("/"))
|
||||
return numerator / denominator
|
||||
# 直接处理小数
|
||||
else:
|
||||
return float(prob_str)
|
||||
|
||||
percent_match = re.search(r"(\d{1,3})%", text) # 65%
|
||||
decimal_match = re.search(r"(0\.\d+|1\.0+)", text) # 0.65
|
||||
fraction_match = re.search(r"(\d+)/(\d+)", text) # 2/3
|
||||
try:
|
||||
if percent_match:
|
||||
prob = float(percent_match.group(1)) / 100
|
||||
elif decimal_match:
|
||||
prob = float(decimal_match.group(0))
|
||||
elif fraction_match:
|
||||
numerator, denominator = map(float, fraction_match.groups())
|
||||
prob = numerator / denominator
|
||||
else:
|
||||
return 0 # 无匹配格式
|
||||
|
||||
# 验证范围是否合法
|
||||
if 0 <= prob <= 1:
|
||||
return prob
|
||||
return 0
|
||||
except (ValueError, ZeroDivisionError):
|
||||
return 0
|
||||
|
||||
@llmcheck_decorator(is_continuous_chat)
|
||||
def get_reply_probability(self, message_id):
|
||||
return super().get_reply_probability(message_id)
|
||||
@@ -10,6 +10,7 @@ Mxp 模式:梦溪畔独家赞助
|
||||
4.限制同时思考的消息数量,防止喷射
|
||||
5.拥有单聊增益,无论在群里还是私聊,只要bot一直和你聊,就会增加意愿值
|
||||
6.意愿分为衰减意愿+临时意愿
|
||||
7.疲劳机制
|
||||
|
||||
如果你发现本模式出现了bug
|
||||
上上策是询问智慧的小草神()
|
||||
@@ -34,26 +35,50 @@ class MxpWillingManager(BaseWillingManager):
|
||||
self.chat_new_message_time: Dict[str, list[float]] = {} # 聊天流ID: 消息时间
|
||||
self.last_response_person: Dict[str, tuple[str, int]] = {} # 上次回复的用户信息
|
||||
self.temporary_willing: float = 0 # 临时意愿值
|
||||
self.chat_bot_message_time: Dict[str, list[float]] = {} # 聊天流ID: bot已回复消息时间
|
||||
self.chat_fatigue_punishment_list: Dict[
|
||||
str, list[tuple[float, float]]
|
||||
] = {} # 聊天流疲劳惩罚列, 聊天流ID: 惩罚时间列(开始时间,持续时间)
|
||||
self.chat_fatigue_willing_attenuation: Dict[str, float] = {} # 聊天流疲劳意愿衰减值
|
||||
|
||||
# 可变参数
|
||||
self.intention_decay_rate = 0.93 # 意愿衰减率
|
||||
self.message_expiration_time = 120 # 消息过期时间(秒)
|
||||
self.number_of_message_storage = 10 # 消息存储数量
|
||||
|
||||
self.number_of_message_storage = 12 # 消息存储数量
|
||||
self.expected_replies_per_min = 3 # 每分钟预期回复数
|
||||
self.basic_maximum_willing = 0.5 # 基础最大意愿值
|
||||
|
||||
self.mention_willing_gain = 0.6 # 提及意愿增益
|
||||
self.interest_willing_gain = 0.3 # 兴趣意愿增益
|
||||
self.emoji_response_penalty = self.global_config.emoji_response_penalty # 表情包回复惩罚
|
||||
self.down_frequency_rate = self.global_config.down_frequency_rate # 降低回复频率的群组惩罚系数
|
||||
self.single_chat_gain = 0.12 # 单聊增益
|
||||
|
||||
self.fatigue_messages_triggered_num = self.expected_replies_per_min # 疲劳消息触发数量(int)
|
||||
self.fatigue_coefficient = 1.0 # 疲劳系数
|
||||
|
||||
self.is_debug = False # 是否开启调试模式
|
||||
|
||||
async def async_task_starter(self) -> None:
|
||||
"""异步任务启动器"""
|
||||
asyncio.create_task(self._return_to_basic_willing())
|
||||
asyncio.create_task(self._chat_new_message_to_change_basic_willing())
|
||||
asyncio.create_task(self._fatigue_attenuation())
|
||||
|
||||
async def before_generate_reply_handle(self, message_id: str):
|
||||
"""回复前处理"""
|
||||
pass
|
||||
current_time = time.time()
|
||||
async with self.lock:
|
||||
w_info = self.ongoing_messages[message_id]
|
||||
if w_info.chat_id not in self.chat_bot_message_time:
|
||||
self.chat_bot_message_time[w_info.chat_id] = []
|
||||
self.chat_bot_message_time[w_info.chat_id] = [
|
||||
t for t in self.chat_bot_message_time[w_info.chat_id] if current_time - t < 60
|
||||
]
|
||||
self.chat_bot_message_time[w_info.chat_id].append(current_time)
|
||||
if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num):
|
||||
time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
|
||||
self.chat_fatigue_punishment_list[w_info.chat_id].append([current_time, time_interval * 2])
|
||||
|
||||
async def after_generate_reply_handle(self, message_id: str):
|
||||
"""回复后处理"""
|
||||
@@ -63,9 +88,9 @@ class MxpWillingManager(BaseWillingManager):
|
||||
rel_level = self._get_relationship_level_num(rel_value)
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += rel_level * 0.05
|
||||
|
||||
now_chat_new_person = self.last_response_person.get(w_info.chat_id, ["", 0])
|
||||
now_chat_new_person = self.last_response_person.get(w_info.chat_id, [w_info.person_id, 0])
|
||||
if now_chat_new_person[0] == w_info.person_id:
|
||||
if now_chat_new_person[1] < 2:
|
||||
if now_chat_new_person[1] < 3:
|
||||
now_chat_new_person[1] += 1
|
||||
else:
|
||||
self.last_response_person[w_info.chat_id] = [w_info.person_id, 0]
|
||||
@@ -75,13 +100,14 @@ class MxpWillingManager(BaseWillingManager):
|
||||
async with self.lock:
|
||||
w_info = self.ongoing_messages[message_id]
|
||||
if w_info.is_mentioned_bot:
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += 0.2
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.mention_willing_gain / 2.5
|
||||
if (
|
||||
w_info.chat_id in self.last_response_person
|
||||
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
|
||||
and self.last_response_person[w_info.chat_id][1]
|
||||
):
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.single_chat_gain * (
|
||||
2 * self.last_response_person[w_info.chat_id][1] + 1
|
||||
2 * self.last_response_person[w_info.chat_id][1] - 1
|
||||
)
|
||||
now_chat_new_person = self.last_response_person.get(w_info.chat_id, ["", 0])
|
||||
if now_chat_new_person[0] != w_info.person_id:
|
||||
@@ -92,35 +118,63 @@ class MxpWillingManager(BaseWillingManager):
|
||||
async with self.lock:
|
||||
w_info = self.ongoing_messages[message_id]
|
||||
current_willing = self.chat_person_reply_willing[w_info.chat_id][w_info.person_id]
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"基础意愿值:{current_willing}")
|
||||
|
||||
if w_info.is_mentioned_bot:
|
||||
current_willing += self.mention_willing_gain / (int(current_willing) + 1)
|
||||
current_willing_ = self.mention_willing_gain / (int(current_willing) + 1)
|
||||
current_willing += current_willing_
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"提及增益:{current_willing_}")
|
||||
|
||||
if w_info.interested_rate > 0:
|
||||
current_willing += math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
|
||||
if self.is_debug:
|
||||
self.logger.debug(
|
||||
f"兴趣增益:{math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain}"
|
||||
)
|
||||
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] = current_willing
|
||||
|
||||
rel_value = await w_info.person_info_manager.get_value(w_info.person_id, "relationship_value")
|
||||
rel_level = self._get_relationship_level_num(rel_value)
|
||||
current_willing += rel_level * 0.1
|
||||
if self.is_debug and rel_level != 0:
|
||||
self.logger.debug(f"关系增益:{rel_level * 0.1}")
|
||||
|
||||
if (
|
||||
w_info.chat_id in self.last_response_person
|
||||
and self.last_response_person[w_info.chat_id][0] == w_info.person_id
|
||||
and self.last_response_person[w_info.chat_id][1]
|
||||
):
|
||||
current_willing += self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)
|
||||
if self.is_debug:
|
||||
self.logger.debug(
|
||||
f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}"
|
||||
)
|
||||
|
||||
current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"疲劳衰减:{self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)}")
|
||||
|
||||
chat_ongoing_messages = [msg for msg in self.ongoing_messages.values() if msg.chat_id == w_info.chat_id]
|
||||
chat_person_ogoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id]
|
||||
if len(chat_person_ogoing_messages) >= 2:
|
||||
current_willing = 0
|
||||
if self.is_debug:
|
||||
self.logger.debug("进行中消息惩罚:归0")
|
||||
elif len(chat_ongoing_messages) == 2:
|
||||
current_willing -= 0.5
|
||||
if self.is_debug:
|
||||
self.logger.debug("进行中消息惩罚:-0.5")
|
||||
elif len(chat_ongoing_messages) == 3:
|
||||
current_willing -= 1.5
|
||||
if self.is_debug:
|
||||
self.logger.debug("进行中消息惩罚:-1.5")
|
||||
elif len(chat_ongoing_messages) >= 4:
|
||||
current_willing = 0
|
||||
if self.is_debug:
|
||||
self.logger.debug("进行中消息惩罚:归0")
|
||||
|
||||
probability = self._willing_to_probability(current_willing)
|
||||
|
||||
@@ -168,32 +222,52 @@ class MxpWillingManager(BaseWillingManager):
|
||||
self.ongoing_messages[message.message_info.message_id].person_id, self.chat_reply_willing[chat.stream_id]
|
||||
)
|
||||
|
||||
current_time = time.time()
|
||||
if chat.stream_id not in self.chat_new_message_time:
|
||||
self.chat_new_message_time[chat.stream_id] = []
|
||||
self.chat_new_message_time[chat.stream_id].append(time.time())
|
||||
self.chat_new_message_time[chat.stream_id].append(current_time)
|
||||
if len(self.chat_new_message_time[chat.stream_id]) > self.number_of_message_storage:
|
||||
self.chat_new_message_time[chat.stream_id].pop(0)
|
||||
|
||||
if chat.stream_id not in self.chat_fatigue_punishment_list:
|
||||
self.chat_fatigue_punishment_list[chat.stream_id] = [
|
||||
(
|
||||
current_time,
|
||||
self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60,
|
||||
)
|
||||
]
|
||||
self.chat_fatigue_willing_attenuation[chat.stream_id] = (
|
||||
-2 * self.basic_maximum_willing * self.fatigue_coefficient
|
||||
)
|
||||
|
||||
def _willing_to_probability(self, willing: float) -> float:
|
||||
"""意愿值转化为概率"""
|
||||
willing = max(0, willing)
|
||||
if willing < 2:
|
||||
probability = math.atan(willing * 2) / math.pi * 2
|
||||
else:
|
||||
elif willing < 2.5:
|
||||
probability = math.atan(willing * 4) / math.pi * 2
|
||||
else:
|
||||
probability = 1
|
||||
return probability
|
||||
|
||||
async def _chat_new_message_to_change_basic_willing(self):
|
||||
"""聊天流新消息改变基础意愿"""
|
||||
while True:
|
||||
update_time = 20
|
||||
while True:
|
||||
await asyncio.sleep(update_time)
|
||||
async with self.lock:
|
||||
for chat_id, message_times in self.chat_new_message_time.items():
|
||||
# 清理过期消息
|
||||
current_time = time.time()
|
||||
message_times = [
|
||||
msg_time for msg_time in message_times if current_time - msg_time < self.message_expiration_time
|
||||
msg_time
|
||||
for msg_time in message_times
|
||||
if current_time - msg_time
|
||||
< self.number_of_message_storage
|
||||
* self.basic_maximum_willing
|
||||
/ self.expected_replies_per_min
|
||||
* 60
|
||||
]
|
||||
self.chat_new_message_time[chat_id] = message_times
|
||||
|
||||
@@ -202,38 +276,14 @@ class MxpWillingManager(BaseWillingManager):
|
||||
update_time = 20
|
||||
elif len(message_times) == self.number_of_message_storage:
|
||||
time_interval = current_time - message_times[0]
|
||||
basic_willing = self.basic_maximum_willing * math.sqrt(
|
||||
time_interval / self.message_expiration_time
|
||||
)
|
||||
basic_willing = self._basic_willing_culculate(time_interval)
|
||||
self.chat_reply_willing[chat_id] = basic_willing
|
||||
update_time = 17 * math.sqrt(time_interval / self.message_expiration_time) + 3
|
||||
update_time = 17 * basic_willing / self.basic_maximum_willing + 3
|
||||
else:
|
||||
self.logger.debug(f"聊天流{chat_id}消息时间数量异常,数量:{len(message_times)}")
|
||||
self.chat_reply_willing[chat_id] = 0
|
||||
|
||||
async def get_variable_parameters(self) -> Dict[str, str]:
|
||||
"""获取可变参数"""
|
||||
return {
|
||||
"intention_decay_rate": "意愿衰减率",
|
||||
"message_expiration_time": "消息过期时间(秒)",
|
||||
"number_of_message_storage": "消息存储数量",
|
||||
"basic_maximum_willing": "基础最大意愿值",
|
||||
"mention_willing_gain": "提及意愿增益",
|
||||
"interest_willing_gain": "兴趣意愿增益",
|
||||
"emoji_response_penalty": "表情包回复惩罚",
|
||||
"down_frequency_rate": "降低回复频率的群组惩罚系数",
|
||||
"single_chat_gain": "单聊增益(不仅是私聊)",
|
||||
}
|
||||
|
||||
async def set_variable_parameters(self, parameters: Dict[str, any]):
|
||||
"""设置可变参数"""
|
||||
async with self.lock:
|
||||
for key, value in parameters.items():
|
||||
if hasattr(self, key):
|
||||
setattr(self, key, value)
|
||||
self.logger.debug(f"参数 {key} 已更新为 {value}")
|
||||
else:
|
||||
self.logger.debug(f"尝试设置未知参数 {key}")
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"聊天流意愿值更新:{self.chat_reply_willing}")
|
||||
|
||||
def _get_relationship_level_num(self, relationship_value) -> int:
|
||||
"""关系等级计算"""
|
||||
@@ -253,5 +303,27 @@ class MxpWillingManager(BaseWillingManager):
|
||||
level_num = 5 if relationship_value > 1000 else 0
|
||||
return level_num - 2
|
||||
|
||||
def _basic_willing_culculate(self, t: float) -> float:
|
||||
"""基础意愿值计算"""
|
||||
return math.tan(t * self.expected_replies_per_min * math.pi / 120 / self.number_of_message_storage) / 2
|
||||
|
||||
async def _fatigue_attenuation(self):
|
||||
"""疲劳衰减"""
|
||||
while True:
|
||||
await asyncio.sleep(1)
|
||||
current_time = time.time()
|
||||
async with self.lock:
|
||||
for chat_id, fatigue_list in self.chat_fatigue_punishment_list.items():
|
||||
fatigue_list = [z for z in fatigue_list if current_time - z[0] < z[1]]
|
||||
self.chat_fatigue_willing_attenuation[chat_id] = 0
|
||||
for start_time, duration in fatigue_list:
|
||||
self.chat_fatigue_willing_attenuation[chat_id] += (
|
||||
self.chat_reply_willing[chat_id]
|
||||
* 2
|
||||
/ math.pi
|
||||
* math.asin(2 * (current_time - start_time) / duration - 1)
|
||||
- self.chat_reply_willing[chat_id]
|
||||
) * self.fatigue_coefficient
|
||||
|
||||
async def get_willing(self, chat_id):
|
||||
return self.temporary_willing
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from src.common.logger import LogConfig, WILLING_STYLE_CONFIG, LoguruLogger, get_module_logger
|
||||
from dataclasses import dataclass
|
||||
from ..config.config import global_config, BotConfig
|
||||
from ...config.config import global_config, BotConfig
|
||||
from ..chat.chat_stream import ChatStream, GroupInfo
|
||||
from ..chat.message import MessageRecv
|
||||
from ..person_info.person_info import person_info_manager, PersonInfoManager
|
||||
@@ -18,8 +18,8 @@ after_generate_reply_handle 确定要回复后,在生成回复后的处理
|
||||
not_reply_handle 确定不回复后的处理
|
||||
get_reply_probability 获取回复概率
|
||||
bombing_buffer_message_handle 缓冲器炸飞消息后的处理
|
||||
get_variable_parameters 获取可变参数组,返回一个字典,key为参数名称,value为参数描述(此方法是为拆分全局设置准备)
|
||||
set_variable_parameters 设置可变参数组,你需要传入一个字典,key为参数名称,value为参数值(此方法是为拆分全局设置准备)
|
||||
get_variable_parameters 暂不确定
|
||||
set_variable_parameters 暂不确定
|
||||
以下2个方法根据你的实现可以做调整:
|
||||
get_willing 获取某聊天流意愿
|
||||
set_willing 设置某聊天流意愿
|
||||
@@ -152,15 +152,15 @@ class BaseWillingManager(ABC):
|
||||
async with self.lock:
|
||||
self.chat_reply_willing[chat_id] = willing
|
||||
|
||||
@abstractmethod
|
||||
async def get_variable_parameters(self) -> Dict[str, str]:
|
||||
"""抽象方法:获取可变参数"""
|
||||
pass
|
||||
# @abstractmethod
|
||||
# async def get_variable_parameters(self) -> Dict[str, str]:
|
||||
# """抽象方法:获取可变参数"""
|
||||
# pass
|
||||
|
||||
@abstractmethod
|
||||
async def set_variable_parameters(self, parameters: Dict[str, any]):
|
||||
"""抽象方法:设置可变参数"""
|
||||
pass
|
||||
# @abstractmethod
|
||||
# async def set_variable_parameters(self, parameters: Dict[str, any]):
|
||||
# """抽象方法:设置可变参数"""
|
||||
# pass
|
||||
|
||||
|
||||
def init_willing_manager() -> BaseWillingManager:
|
||||
|
||||
Reference in New Issue
Block a user