secret 神秘小功能

This commit is contained in:
SengokuCola
2025-03-19 15:22:34 +08:00
parent b187c8a21b
commit 1076b509a3
4 changed files with 349 additions and 11 deletions

View File

@@ -0,0 +1,46 @@
{
"final_scores": {
"开放性": 5.5,
"尽责性": 5.0,
"外向性": 6.0,
"宜人性": 1.5,
"神经质": 6.0
},
"scenarios": [
{
"场景": "在团队项目中,你发现一个同事的工作质量明显低于预期,这可能会影响整个项目的进度。",
"评估维度": [
"尽责性",
"宜人性"
]
},
{
"场景": "你被邀请参加一个完全陌生的社交活动,现场都是不认识的人。",
"评估维度": [
"外向性",
"神经质"
]
},
{
"场景": "你的朋友向你推荐了一个新的艺术展览,但风格与你平时接触的完全不同。",
"评估维度": [
"开放性",
"外向性"
]
},
{
"场景": "在工作中,你遇到了一个技术难题,需要学习全新的技术栈。",
"评估维度": [
"开放性",
"尽责性"
]
},
{
"场景": "你的朋友因为个人原因情绪低落,向你寻求帮助。",
"评估维度": [
"宜人性",
"神经质"
]
}
]
}

View File

@@ -27,17 +27,6 @@ class PromptBuilder:
message_txt: str, message_txt: str,
sender_name: str = "某人", sender_name: str = "某人",
stream_id: Optional[int] = None) -> tuple[str, str]: stream_id: Optional[int] = None) -> tuple[str, str]:
"""构建prompt
Args:
message_txt: 消息文本
sender_name: 发送者昵称
# relationship_value: 关系值
group_id: 群组ID
Returns:
str: 构建好的prompt
"""
# 关系(载入当前聊天记录里部分人的关系) # 关系(载入当前聊天记录里部分人的关系)
who_chat_in_group = [chat_stream] who_chat_in_group = [chat_stream]
who_chat_in_group += get_recent_group_speaker( who_chat_in_group += get_recent_group_speaker(

View File

@@ -0,0 +1,128 @@
import asyncio
import os
import time
from typing import Tuple, Union
import aiohttp
import requests
from src.common.logger import get_module_logger
logger = get_module_logger("offline_llm")
class LLMModel:
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
self.model_name = model_name
self.params = kwargs
self.api_key = os.getenv("SILICONFLOW_KEY")
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
if not self.api_key or not self.base_url:
raise ValueError("环境变量未正确加载SILICONFLOW_KEY 或 SILICONFLOW_BASE_URL 未设置")
logger.info(f"API URL: {self.base_url}") # 使用 logger 记录 base_url
def generate_response(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""根据输入的提示生成模型的响应"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15 # 基础等待时间(秒)
for retry in range(max_retries):
try:
response = requests.post(api_url, headers=headers, json=data)
if response.status_code == 429:
wait_time = base_wait_time * (2 ** retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
time.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2 ** retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
time.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""
async def generate_response_async(self, prompt: str) -> Union[str, Tuple[str, str]]:
"""异步方式根据输入的提示生成模型的响应"""
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
# 构建请求体
data = {
"model": self.model_name,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.5,
**self.params
}
# 发送请求到完整的 chat/completions 端点
api_url = f"{self.base_url.rstrip('/')}/chat/completions"
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
base_wait_time = 15
async with aiohttp.ClientSession() as session:
for retry in range(max_retries):
try:
async with session.post(api_url, headers=headers, json=data) as response:
if response.status == 429:
wait_time = base_wait_time * (2 ** retry) # 指数退避
logger.warning(f"遇到请求限制(429),等待{wait_time}秒后重试...")
await asyncio.sleep(wait_time)
continue
response.raise_for_status() # 检查其他响应状态
result = await response.json()
if "choices" in result and len(result["choices"]) > 0:
content = result["choices"][0]["message"]["content"]
reasoning_content = result["choices"][0]["message"].get("reasoning_content", "")
return content, reasoning_content
return "没有返回结果", ""
except Exception as e:
if retry < max_retries - 1: # 如果还有重试机会
wait_time = base_wait_time * (2 ** retry)
logger.error(f"[回复]请求失败,等待{wait_time}秒后重试... 错误: {str(e)}")
await asyncio.sleep(wait_time)
else:
logger.error(f"请求失败: {str(e)}")
return f"请求失败: {str(e)}", ""
logger.error("达到最大重试次数,请求仍然失败")
return "达到最大重试次数,请求仍然失败", ""

View File

@@ -0,0 +1,175 @@
from typing import Dict, List
import json
import os
import random
from pathlib import Path
from dotenv import load_dotenv
import sys
current_dir = Path(__file__).resolve().parent
# 获取项目根目录(上三层目录)
project_root = current_dir.parent.parent.parent
# env.dev文件路径
env_path = project_root / ".env.prod"
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
sys.path.append(root_path)
from src.plugins.personality.offline_llm import LLMModel
# 加载环境变量
if env_path.exists():
print(f"{env_path} 加载环境变量")
load_dotenv(env_path)
else:
print(f"未找到环境变量文件: {env_path}")
print("将使用默认配置")
class PersonalityEvaluator:
def __init__(self):
self.personality_traits = {
"开放性": 0,
"尽责性": 0,
"外向性": 0,
"宜人性": 0,
"神经质": 0
}
self.scenarios = [
{
"场景": "在团队项目中,你发现一个同事的工作质量明显低于预期,这可能会影响整个项目的进度。",
"评估维度": ["尽责性", "宜人性"]
},
{
"场景": "你被邀请参加一个完全陌生的社交活动,现场都是不认识的人。",
"评估维度": ["外向性", "神经质"]
},
{
"场景": "你的朋友向你推荐了一个新的艺术展览,但风格与你平时接触的完全不同。",
"评估维度": ["开放性", "外向性"]
},
{
"场景": "在工作中,你遇到了一个技术难题,需要学习全新的技术栈。",
"评估维度": ["开放性", "尽责性"]
},
{
"场景": "你的朋友因为个人原因情绪低落,向你寻求帮助。",
"评估维度": ["宜人性", "神经质"]
}
]
self.llm = LLMModel()
def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]:
"""
使用 DeepSeek AI 评估用户对特定场景的反应
"""
prompt = f"""请根据以下场景和用户描述评估用户在大五人格模型中的相关维度得分0-10分
场景:{scenario}
用户描述:{response}
需要评估的维度:{', '.join(dimensions)}
请按照以下格式输出评估结果仅输出JSON格式
{{
"维度1": 分数,
"维度2": 分数
}}
评估标准:
- 开放性:对新事物的接受程度和创造性思维
- 尽责性:计划性、组织性和责任感
- 外向性:社交倾向和能量水平
- 宜人性:同理心、合作性和友善程度
- 神经质:情绪稳定性和压力应对能力
请确保分数在0-10之间并给出合理的评估理由。"""
try:
ai_response, _ = self.llm.generate_response(prompt)
# 尝试从AI响应中提取JSON部分
start_idx = ai_response.find('{')
end_idx = ai_response.rfind('}') + 1
if start_idx != -1 and end_idx != 0:
json_str = ai_response[start_idx:end_idx]
scores = json.loads(json_str)
# 确保所有分数在0-10之间
return {k: max(0, min(10, float(v))) for k, v in scores.items()}
else:
print("AI响应格式不正确使用默认评分")
return {dim: 5.0 for dim in dimensions}
except Exception as e:
print(f"评估过程出错:{str(e)}")
return {dim: 5.0 for dim in dimensions}
def main():
print("欢迎使用人格形象创建程序!")
print("接下来,您将面对一系列场景。请根据您想要创建的角色形象,描述在该场景下可能的反应。")
print("每个场景都会评估不同的人格维度,最终得出完整的人格特征评估。")
print("\n准备好了吗?按回车键开始...")
input()
evaluator = PersonalityEvaluator()
final_scores = {
"开放性": 0,
"尽责性": 0,
"外向性": 0,
"宜人性": 0,
"神经质": 0
}
dimension_counts = {trait: 0 for trait in final_scores.keys()}
for i, scenario_data in enumerate(evaluator.scenarios, 1):
print(f"\n场景 {i}/{len(evaluator.scenarios)}:")
print("-" * 50)
print(scenario_data["场景"])
print("\n请描述您的角色在这种情况下会如何反应:")
response = input().strip()
if not response:
print("反应描述不能为空!")
continue
print("\n正在评估您的描述...")
scores = evaluator.evaluate_response(scenario_data["场景"], response, scenario_data["评估维度"])
# 更新最终分数
for dimension, score in scores.items():
final_scores[dimension] += score
dimension_counts[dimension] += 1
print("\n当前评估结果:")
print("-" * 30)
for dimension, score in scores.items():
print(f"{dimension}: {score}/10")
if i < len(evaluator.scenarios):
print("\n按回车键继续下一个场景...")
input()
# 计算平均分
for dimension in final_scores:
if dimension_counts[dimension] > 0:
final_scores[dimension] = round(final_scores[dimension] / dimension_counts[dimension], 2)
print("\n最终人格特征评估结果:")
print("-" * 30)
for trait, score in final_scores.items():
print(f"{trait}: {score}/10")
# 保存结果
result = {
"final_scores": final_scores,
"scenarios": evaluator.scenarios
}
# 确保目录存在
os.makedirs("results", exist_ok=True)
# 保存到文件
with open("results/personality_result.json", "w", encoding="utf-8") as f:
json.dump(result, f, ensure_ascii=False, indent=2)
print("\n结果已保存到 results/personality_result.json")
if __name__ == "__main__":
main()