This commit is contained in:
SengokuCola
2025-06-06 11:36:55 +08:00
parent 3c779f9340
commit 3c955c8a34
13 changed files with 5 additions and 75 deletions

View File

@@ -4,8 +4,6 @@ import time
import re
from datetime import datetime
from typing import Dict, List, Any
import pandas as pd
from pathlib import Path
import sqlite3
def clean_group_name(name: str) -> str:
@@ -194,7 +192,7 @@ def analyze_expressions():
f.write(f"详细报告: {group_file}\n")
f.write("-" * 40 + "\n\n")
print(f"分析报告已生成:")
print("分析报告已生成:")
print(f"总报告: {summary_file}")
print(f"人格表达报告: {personality_report}")
print(f"各群组详细报告位于: {output_dir}")

View File

@@ -4,7 +4,6 @@ import time
import traceback
from collections import deque
from typing import List, Optional, Dict, Any, Deque, Callable, Awaitable
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.chat_stream import chat_manager
from rich.traceback import install
from src.chat.utils.prompt_builder import global_prompt_manager
@@ -33,9 +32,7 @@ from src.chat.focus_chat.info_processors.self_processor import SelfProcessor
from src.chat.focus_chat.planners.planner_factory import PlannerFactory
from src.chat.focus_chat.planners.modify_actions import ActionModifier
from src.chat.focus_chat.planners.action_manager import ActionManager
from src.chat.focus_chat.working_memory.working_memory import WorkingMemory
from src.config.config import global_config
from src.common.database.database_model import ActionRecords
install(extra_lines=3)

View File

@@ -8,7 +8,6 @@ from src.common.logger_manager import get_logger
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.message_receive.chat_stream import chat_manager
from src.person_info.relationship_manager import relationship_manager
from .base_processor import BaseProcessor
from typing import List, Optional
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation

View File

@@ -7,7 +7,6 @@ from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.tools.tool_use import ToolUser
from src.chat.utils.json_utils import process_llm_tool_calls
from src.person_info.relationship_manager import relationship_manager
from .base_processor import BaseProcessor
from typing import List, Optional, Dict
from src.chat.heart_flow.observation.observation import Observation

View File

@@ -363,14 +363,14 @@ class DefaultReplyer:
# 1. learnt_expressions加权随机选3条
if learnt_style_expressions:
weights = [expr["count"] for expr in learnt_style_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 3)
selected_learnt = weighted_sample_no_replacement(learnt_style_expressions, weights, 4)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
style_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")
# 2. learnt_grammar_expressions加权随机选3条
if learnt_grammar_expressions:
weights = [expr["count"] for expr in learnt_grammar_expressions]
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 3)
selected_learnt = weighted_sample_no_replacement(learnt_grammar_expressions, weights, 4)
for expr in selected_learnt:
if isinstance(expr, dict) and "situation" in expr and "style" in expr:
grammar_habbits.append(f"{expr['situation']}时,使用 {expr['style']}")

View File

@@ -4,7 +4,6 @@ from typing import Dict, Any, Optional, List
from src.common.logger_manager import get_logger
from src.chat.message_receive.chat_stream import chat_manager
from src.chat.heart_flow.sub_heartflow import SubHeartflow, ChatState
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
# 初始化日志记录器

View File

@@ -1,4 +1,3 @@
import asyncio
from typing import Optional, Tuple, Dict
from src.common.logger_manager import get_logger
from src.chat.message_receive.chat_stream import chat_manager

View File

@@ -24,7 +24,6 @@ from src.chat.normal_chat.normal_chat_planner import NormalChatPlanner
from src.chat.normal_chat.normal_chat_action_modifier import NormalChatActionModifier
from src.chat.normal_chat.normal_chat_expressor import NormalChatExpressor
from src.chat.focus_chat.replyer.default_replyer import DefaultReplyer
from src.common.database.database_model import ActionRecords
logger = get_logger("normal_chat")

View File

@@ -1,10 +1,9 @@
from typing import List, Optional, Tuple, Union
from typing import List, Optional, Union
import random
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config
from src.chat.message_receive.message import MessageThinking
from src.chat.normal_chat.normal_prompt import prompt_builder
from src.chat.utils.utils import process_llm_response
from src.chat.utils.timer_calculator import Timer
from src.common.logger_manager import get_logger
from src.chat.utils.info_catcher import info_catcher_manager
@@ -58,7 +57,7 @@ class NormalChatGenerator:
if model_response:
logger.debug(f"{global_config.bot.nickname}的原始回复是:{model_response}")
model_response = await self._process_response(model_response)
model_response = await self.process_llm_response(model_response)
return model_response
else:
@@ -167,15 +166,3 @@ class NormalChatGenerator:
except Exception as e:
logger.debug(f"获取情感标签时出错: {e}")
return "中立", "平静" # 出错时返回默认值
@staticmethod
async def _process_response(content: str) -> Tuple[List[str], List[str]]:
"""处理响应内容,返回处理后的内容和情感标签"""
if not content:
return None, []
processed_response = process_llm_response(content)
# print(f"得到了处理后的llm返回{processed_response}")
return processed_response

View File

@@ -1,5 +1,4 @@
import json
from re import A
from typing import Dict, Any
from rich.traceback import install
from src.llm_models.utils_model import LLMRequest

View File

@@ -5,7 +5,6 @@ from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.person_info.relationship_manager import relationship_manager
import time
from typing import Optional
from src.chat.utils.utils import get_recent_group_speaker
from src.manager.mood_manager import mood_manager
from src.chat.memory_system.Hippocampus import HippocampusManager

View File

@@ -1,6 +1,5 @@
import random
from reportportal_client import current
from src.common.logger_manager import get_logger
from src.llm_models.utils_model import LLMRequest
from src.config.config import global_config

View File

@@ -1,44 +0,0 @@
import asyncio
import pytest
from src.llm_models.utils_model import LLMRequest
import os
from dotenv import load_dotenv
# 加载环境变量
load_dotenv()
@pytest.mark.asyncio
async def test_model_request():
# 创建模型配置
model_config = {
"name": "deepseek-v3", # 使用测试模型
"provider": "CHATANY", # 使用测试提供商
"temp": 0.3,
"enable_thinking": False
}
# 创建LLMRequest实例
llm = LLMRequest(model=model_config)
# 测试提示词
test_prompt = "你好,请做个自我介绍"
try:
# 测试生成响应
content, (reasoning_content, model_name) = await llm.generate_response_async(test_prompt)
# 打印结果
print(f"\n模型名称: {model_name}")
print(f"回复内容: {content}")
print(f"推理内容: {reasoning_content}")
# 基本断言
assert content is not None, "回复内容不应为空"
assert isinstance(content, str), "回复内容应为字符串"
except Exception as e:
pytest.fail(f"测试失败: {str(e)}")
if __name__ == "__main__":
# 直接运行测试
asyncio.run(test_model_request())