Merge branch 'dev' into fix-kaomoji-missing-bug
This commit is contained in:
5
.github/workflows/ruff.yml
vendored
5
.github/workflows/ruff.yml
vendored
@@ -12,7 +12,10 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.head_ref || github.ref_name }}
|
||||
- uses: astral-sh/ruff-action@v3
|
||||
- name: Install the latest version of ruff
|
||||
uses: astral-sh/ruff-action@v3
|
||||
with:
|
||||
version: "latest"
|
||||
- run: ruff check --fix
|
||||
- run: ruff format
|
||||
- name: Commit changes
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -29,7 +29,6 @@ config/bot_config_dev.toml
|
||||
config/bot_config.toml
|
||||
config/bot_config.toml.bak
|
||||
src/plugins/remote/client_uuid.json
|
||||
run_none.bat
|
||||
(测试版)麦麦生成人格.bat
|
||||
(临时版)麦麦开始学习.bat
|
||||
src/plugins/utils/statistic.py
|
||||
|
||||
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@@ -1,4 +1,4 @@
|
||||
from src.plugins.models.utils_model import LLM_request
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.plugins.chat.chat_stream import ChatStream
|
||||
from src.common.database import db
|
||||
@@ -18,7 +18,7 @@ logger = get_module_logger("tool_use", config=tool_use_config)
|
||||
|
||||
class ToolUser:
|
||||
def __init__(self):
|
||||
self.llm_model_tool = LLM_request(
|
||||
self.llm_model_tool = LLMRequest(
|
||||
model=global_config.llm_tool_use, temperature=0.2, max_tokens=1000, request_type="tool_use"
|
||||
)
|
||||
|
||||
@@ -41,6 +41,12 @@ class ToolUser:
|
||||
else:
|
||||
mid_memory_info = ""
|
||||
|
||||
stream_id = chat_stream.stream_id
|
||||
chat_talking_prompt = ""
|
||||
if stream_id:
|
||||
chat_talking_prompt = get_recent_group_detailed_plain_text(
|
||||
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
||||
)
|
||||
new_messages = list(
|
||||
db.messages.find({"chat_id": chat_stream.stream_id, "time": {"$gt": time.time()}}).sort("time", 1).limit(15)
|
||||
)
|
||||
@@ -54,9 +60,10 @@ class ToolUser:
|
||||
prompt = ""
|
||||
prompt += mid_memory_info
|
||||
prompt += "你正在思考如何回复群里的消息。\n"
|
||||
prompt += f"之前群里进行了如下讨论:\n"
|
||||
prompt += chat_talking_prompt
|
||||
prompt += f"你注意到{sender_name}刚刚说:{message_txt}\n"
|
||||
prompt += f"注意你就是{bot_name},{bot_name}指的就是你。"
|
||||
|
||||
prompt += f"注意你就是{bot_name},{bot_name}是你的名字。根据之前的聊天记录补充问题信息,搜索时避开你的名字。\n"
|
||||
prompt += "你现在需要对群里的聊天内容进行回复,现在选择工具来对消息和你的回复进行处理,你是否需要额外的信息,比如回忆或者搜寻已有的知识,改变关系和情感,或者了解你现在正在做什么。"
|
||||
return prompt
|
||||
|
||||
@@ -107,7 +114,7 @@ class ToolUser:
|
||||
return None
|
||||
|
||||
async def use_tool(
|
||||
self, message_txt: str, sender_name: str, chat_stream: ChatStream, subheartflow: SubHeartflow = None
|
||||
self, message_txt: str, sender_name: str, chat_stream: ChatStream, sub_heartflow: SubHeartflow = None
|
||||
):
|
||||
"""使用工具辅助思考,判断是否需要额外信息
|
||||
|
||||
@@ -115,13 +122,14 @@ class ToolUser:
|
||||
message_txt: 用户消息文本
|
||||
sender_name: 发送者名称
|
||||
chat_stream: 聊天流对象
|
||||
sub_heartflow: 子心流对象(可选)
|
||||
|
||||
Returns:
|
||||
dict: 工具使用结果,包含结构化的信息
|
||||
"""
|
||||
try:
|
||||
# 构建提示词
|
||||
prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, subheartflow)
|
||||
prompt = await self._build_tool_prompt(message_txt, sender_name, chat_stream, sub_heartflow)
|
||||
|
||||
# 定义可用工具
|
||||
tools = self._define_tools()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .sub_heartflow import SubHeartflow
|
||||
from .observation import ChattingObservation
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.plugins.models.utils_model import LLM_request
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.plugins.schedule.schedule_generator import bot_schedule
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
@@ -60,7 +60,7 @@ class Heartflow:
|
||||
self.current_mind = "你什么也没想"
|
||||
self.past_mind = []
|
||||
self.current_state: CurrentState = CurrentState()
|
||||
self.llm_model = LLM_request(
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# 定义了来自外部世界的信息
|
||||
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
|
||||
from datetime import datetime
|
||||
from src.plugins.models.utils_model import LLM_request
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.common.database import db
|
||||
from src.common.logger import get_module_logger
|
||||
@@ -40,7 +40,7 @@ class ChattingObservation(Observation):
|
||||
|
||||
self.updating_old = False
|
||||
|
||||
self.llm_summary = LLM_request(
|
||||
self.llm_summary = LLMRequest(
|
||||
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
|
||||
)
|
||||
|
||||
@@ -57,8 +57,9 @@ class ChattingObservation(Observation):
|
||||
msg_str = ""
|
||||
for msg in mid_memory_by_id["messages"]:
|
||||
msg_str += f"{msg['detailed_plain_text']}"
|
||||
time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
|
||||
mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
|
||||
# time_diff = int((datetime.now().timestamp() - mid_memory_by_id["created_at"]) / 60)
|
||||
# mid_memory_str += f"距离现在{time_diff}分钟前:\n{msg_str}\n"
|
||||
mid_memory_str += f"{msg_str}\n"
|
||||
except Exception as e:
|
||||
logger.error(f"获取mid_memory_id失败: {e}")
|
||||
traceback.print_exc()
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
from .observation import Observation, ChattingObservation
|
||||
import asyncio
|
||||
from src.plugins.moods.moods import MoodManager
|
||||
from src.plugins.models.utils_model import LLM_request
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
import re
|
||||
import time
|
||||
from src.plugins.chat.message import UserInfo
|
||||
from src.plugins.chat.utils import parse_text_timestamps
|
||||
|
||||
# from src.plugins.schedule.schedule_generator import bot_schedule
|
||||
# from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
@@ -37,11 +38,11 @@ def init_prompt():
|
||||
prompt += "{prompt_personality}\n"
|
||||
prompt += "刚刚你的想法是{current_thinking_info}。可以适当转换话题\n"
|
||||
prompt += "-----------------------------------\n"
|
||||
prompt += "现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
||||
prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n"
|
||||
prompt += "你现在{mood_info}\n"
|
||||
prompt += "你注意到{sender_name}刚刚说:{message_txt}\n"
|
||||
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白"
|
||||
prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话\n"
|
||||
prompt += "思考时可以想想如何对群聊内容进行回复。回复的要求是:平淡一些,简短一些,说中文,尽量不要说你说过的话。如果你要回复,最好只回复一个人的一个话题\n"
|
||||
prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
|
||||
prompt += "记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{bot_name},{bot_name}指的就是你。"
|
||||
Prompt(prompt, "sub_heartflow_prompt_before")
|
||||
@@ -49,7 +50,7 @@ def init_prompt():
|
||||
# prompt += f"你现在正在做的事情是:{schedule_info}\n"
|
||||
prompt += "{extra_info}\n"
|
||||
prompt += "{prompt_personality}\n"
|
||||
prompt += "现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{chat_observe_info}\n"
|
||||
prompt += "现在是{time_now},你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:\n{chat_observe_info}\n"
|
||||
prompt += "刚刚你的想法是{current_thinking_info}。"
|
||||
prompt += "你现在看到了网友们发的新消息:{message_new_info}\n"
|
||||
prompt += "你刚刚回复了群友们:{reply_info}"
|
||||
@@ -78,7 +79,7 @@ class SubHeartflow:
|
||||
self.current_mind = ""
|
||||
self.past_mind = []
|
||||
self.current_state: CurrentState = CurrentState()
|
||||
self.llm_model = LLM_request(
|
||||
self.llm_model = LLMRequest(
|
||||
model=global_config.llm_sub_heartflow,
|
||||
temperature=global_config.llm_sub_heartflow["temp"],
|
||||
max_tokens=600,
|
||||
@@ -154,7 +155,7 @@ class SubHeartflow:
|
||||
await observation.observe()
|
||||
|
||||
async def do_thinking_before_reply(
|
||||
self, message_txt: str, sender_name: str, chat_stream: ChatStream, extra_info: str, obs_id: int = None
|
||||
self, message_txt: str, sender_info: UserInfo, chat_stream: ChatStream, extra_info: str, obs_id: int = None
|
||||
):
|
||||
current_thinking_info = self.current_mind
|
||||
mood_info = self.current_state.mood
|
||||
@@ -207,7 +208,11 @@ class SubHeartflow:
|
||||
# f"根据你和说话者{sender_name}的关系和态度进行回复,明确你的立场和情感。"
|
||||
# )
|
||||
relation_prompt_all = (await global_prompt_manager.get_prompt_async("relationship_prompt")).format(
|
||||
relation_prompt, sender_name
|
||||
relation_prompt, sender_info.user_nickname
|
||||
)
|
||||
|
||||
sender_name_sign = (
|
||||
f"<{chat_stream.platform}:{sender_info.user_id}:{sender_info.user_nickname}:{sender_info.user_cardname}>"
|
||||
)
|
||||
|
||||
# prompt = ""
|
||||
@@ -226,19 +231,25 @@ class SubHeartflow:
|
||||
# prompt += "请注意不要输出多余内容(包括前后缀,冒号和引号,括号, 表情,等),不要带有括号和动作描写"
|
||||
# prompt += f"记得结合上述的消息,生成内心想法,文字不要浮夸,注意你就是{self.bot_name},{self.bot_name}指的就是你。"
|
||||
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_before")).format(
|
||||
extra_info_prompt,
|
||||
# prompt_schedule,
|
||||
relation_prompt_all,
|
||||
prompt_personality,
|
||||
current_thinking_info,
|
||||
time_now,
|
||||
chat_observe_info,
|
||||
mood_info,
|
||||
sender_name,
|
||||
sender_name_sign,
|
||||
message_txt,
|
||||
self.bot_name,
|
||||
)
|
||||
|
||||
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
|
||||
prompt = parse_text_timestamps(prompt, mode="lite")
|
||||
|
||||
try:
|
||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
except Exception as e:
|
||||
@@ -286,9 +297,12 @@ class SubHeartflow:
|
||||
message_new_info = chat_talking_prompt
|
||||
reply_info = reply_content
|
||||
|
||||
time_now = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
|
||||
|
||||
prompt = (await global_prompt_manager.get_prompt_async("sub_heartflow_prompt_after")).format(
|
||||
extra_info_prompt,
|
||||
prompt_personality,
|
||||
time_now,
|
||||
chat_observe_info,
|
||||
current_thinking_info,
|
||||
message_new_info,
|
||||
@@ -296,6 +310,9 @@ class SubHeartflow:
|
||||
mood_info,
|
||||
)
|
||||
|
||||
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
|
||||
prompt = parse_text_timestamps(prompt, mode="lite")
|
||||
|
||||
try:
|
||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
except Exception as e:
|
||||
@@ -308,48 +325,6 @@ class SubHeartflow:
|
||||
|
||||
self.last_reply_time = time.time()
|
||||
|
||||
async def judge_willing(self):
|
||||
# 开始构建prompt
|
||||
prompt_personality = "你"
|
||||
# person
|
||||
individuality = Individuality.get_instance()
|
||||
|
||||
personality_core = individuality.personality.personality_core
|
||||
prompt_personality += personality_core
|
||||
|
||||
personality_sides = individuality.personality.personality_sides
|
||||
random.shuffle(personality_sides)
|
||||
prompt_personality += f",{personality_sides[0]}"
|
||||
|
||||
identity_detail = individuality.identity.identity_detail
|
||||
random.shuffle(identity_detail)
|
||||
prompt_personality += f",{identity_detail[0]}"
|
||||
|
||||
# print("麦麦闹情绪了1")
|
||||
current_thinking_info = self.current_mind
|
||||
mood_info = self.current_state.mood
|
||||
# print("麦麦闹情绪了2")
|
||||
prompt = ""
|
||||
prompt += f"{prompt_personality}\n"
|
||||
prompt += "现在你正在上网,和qq群里的网友们聊天"
|
||||
prompt += f"你现在的想法是{current_thinking_info}。"
|
||||
prompt += f"你现在{mood_info}。"
|
||||
prompt += "现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。"
|
||||
prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
|
||||
try:
|
||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||
# 解析willing值
|
||||
willing_match = re.search(r"<(\d+)>", response)
|
||||
except Exception as e:
|
||||
logger.error(f"意愿判断获取失败: {e}")
|
||||
willing_match = None
|
||||
if willing_match:
|
||||
self.current_state.willing = int(willing_match.group(1))
|
||||
else:
|
||||
self.current_state.willing = 0
|
||||
|
||||
return self.current_state.willing
|
||||
|
||||
def update_current_mind(self, response):
|
||||
self.past_mind.append(self.current_mind)
|
||||
self.current_mind = response
|
||||
|
||||
@@ -10,7 +10,7 @@ from src.common.logger import get_module_logger
|
||||
logger = get_module_logger("offline_llm")
|
||||
|
||||
|
||||
class LLM_request_off:
|
||||
class LLMRequestOff:
|
||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-V3", **kwargs):
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
|
||||
@@ -19,7 +19,7 @@ with open(config_path, "r", encoding="utf-8") as f:
|
||||
# 现在可以导入src模块
|
||||
from src.individuality.scene import get_scene_by_factor, PERSONALITY_SCENES # noqa E402
|
||||
from src.individuality.questionnaire import FACTOR_DESCRIPTIONS # noqa E402
|
||||
from src.individuality.offline_llm import LLM_request_off # noqa E402
|
||||
from src.individuality.offline_llm import LLMRequestOff # noqa E402
|
||||
|
||||
# 加载环境变量
|
||||
env_path = os.path.join(root_path, ".env")
|
||||
@@ -65,7 +65,7 @@ def adapt_scene(scene: str) -> str:
|
||||
现在,请你给出改编后的场景描述
|
||||
"""
|
||||
|
||||
llm = LLM_request_off(model_name=config["model"]["llm_normal"]["name"])
|
||||
llm = LLMRequestOff(model_name=config["model"]["llm_normal"]["name"])
|
||||
adapted_scene, _ = llm.generate_response(prompt)
|
||||
|
||||
# 检查返回的场景是否为空或错误信息
|
||||
@@ -79,7 +79,7 @@ def adapt_scene(scene: str) -> str:
|
||||
return scene
|
||||
|
||||
|
||||
class PersonalityEvaluator_direct:
|
||||
class PersonalityEvaluatorDirect:
|
||||
def __init__(self):
|
||||
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
self.scenarios = []
|
||||
@@ -110,7 +110,7 @@ class PersonalityEvaluator_direct:
|
||||
{"场景": scene["scenario"], "评估维度": [trait, secondary_trait], "场景编号": scene_key}
|
||||
)
|
||||
|
||||
self.llm = LLM_request_off()
|
||||
self.llm = LLMRequestOff()
|
||||
|
||||
def evaluate_response(self, scenario: str, response: str, dimensions: List[str]) -> Dict[str, float]:
|
||||
"""
|
||||
@@ -269,7 +269,7 @@ class PersonalityEvaluator_direct:
|
||||
|
||||
|
||||
def main():
|
||||
evaluator = PersonalityEvaluator_direct()
|
||||
evaluator = PersonalityEvaluatorDirect()
|
||||
result = evaluator.run_evaluation()
|
||||
|
||||
# 准备简化的结果数据
|
||||
|
||||
@@ -9,7 +9,7 @@ from .plugins.willing.willing_manager import willing_manager
|
||||
from .plugins.chat.chat_stream import chat_manager
|
||||
from .heart_flow.heartflow import heartflow
|
||||
from .plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from .plugins.chat.message_sender import message_manager
|
||||
from .plugins.chat.messagesender import message_manager
|
||||
from .plugins.storage.storage import MessageStorage
|
||||
from .plugins.config.config import global_config
|
||||
from .plugins.chat.bot import chat_bot
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from .chat_observer import ChatObserver
|
||||
from .pfc_utils import get_items_from_json
|
||||
@@ -23,7 +23,7 @@ class ActionPlanner:
|
||||
"""行动规划器"""
|
||||
|
||||
def __init__(self, stream_id: str):
|
||||
self.llm = LLM_request(
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=1000,
|
||||
|
||||
@@ -4,7 +4,7 @@ from ..chat.chat_stream import ChatStream
|
||||
from ..chat.message import Message
|
||||
from ..message.message_base import Seg
|
||||
from src.plugins.chat.message import MessageSending, MessageSet
|
||||
from src.plugins.chat.message_sender import message_manager
|
||||
from src.plugins.chat.messagesender import message_manager
|
||||
|
||||
logger = get_module_logger("message_sender")
|
||||
|
||||
|
||||
@@ -50,21 +50,18 @@ class MessageStorage(ABC):
|
||||
class MongoDBMessageStorage(MessageStorage):
|
||||
"""MongoDB消息存储实现"""
|
||||
|
||||
def __init__(self):
|
||||
self.db = db
|
||||
|
||||
async def get_messages_after(self, chat_id: str, message_time: float) -> List[Dict[str, Any]]:
|
||||
query = {"chat_id": chat_id}
|
||||
# print(f"storage_check_message: {message_time}")
|
||||
|
||||
query["time"] = {"$gt": message_time}
|
||||
|
||||
return list(self.db.messages.find(query).sort("time", 1))
|
||||
return list(db.messages.find(query).sort("time", 1))
|
||||
|
||||
async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
|
||||
query = {"chat_id": chat_id, "time": {"$lt": time_point}}
|
||||
|
||||
messages = list(self.db.messages.find(query).sort("time", -1).limit(limit))
|
||||
messages = list(db.messages.find(query).sort("time", -1).limit(limit))
|
||||
|
||||
# 将消息按时间正序排列
|
||||
messages.reverse()
|
||||
@@ -73,7 +70,7 @@ class MongoDBMessageStorage(MessageStorage):
|
||||
async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
|
||||
query = {"chat_id": chat_id, "time": {"$gt": after_time}}
|
||||
|
||||
return self.db.messages.find_one(query) is not None
|
||||
return db.messages.find_one(query) is not None
|
||||
|
||||
|
||||
# # 创建一个内存消息存储实现,用于测试
|
||||
|
||||
@@ -120,6 +120,10 @@ class ObservationInfo:
|
||||
# #spec
|
||||
# meta_plan_trigger: bool = False
|
||||
|
||||
def __init__(self):
|
||||
self.last_message_id = None
|
||||
self.chat_observer = None
|
||||
|
||||
def __post_init__(self):
|
||||
"""初始化后创建handler"""
|
||||
self.chat_observer = None
|
||||
@@ -129,7 +133,7 @@ class ObservationInfo:
|
||||
"""绑定到指定的chat_observer
|
||||
|
||||
Args:
|
||||
stream_id: 聊天流ID
|
||||
chat_observer: 要绑定的ChatObserver实例
|
||||
"""
|
||||
self.chat_observer = chat_observer
|
||||
self.chat_observer.notification_manager.register_handler(
|
||||
@@ -171,7 +175,8 @@ class ObservationInfo:
|
||||
self.last_bot_speak_time = message["time"]
|
||||
else:
|
||||
self.last_user_speak_time = message["time"]
|
||||
self.active_users.add(user_info.user_id)
|
||||
if user_info.user_id is not None:
|
||||
self.active_users.add(str(user_info.user_id))
|
||||
|
||||
self.new_messages_count += 1
|
||||
self.unprocessed_messages.append(message)
|
||||
@@ -227,7 +232,7 @@ class ObservationInfo:
|
||||
"""清空未处理消息列表"""
|
||||
# 将未处理消息添加到历史记录中
|
||||
for message in self.unprocessed_messages:
|
||||
self.chat_history.append(message)
|
||||
self.chat_history.append(message) # TODO NEED FIX TYPE???
|
||||
# 清空未处理消息列表
|
||||
self.has_unread_messages = False
|
||||
self.unprocessed_messages.clear()
|
||||
|
||||
@@ -8,7 +8,7 @@ from src.common.logger import get_module_logger
|
||||
from ..chat.chat_stream import ChatStream
|
||||
from ..message.message_base import UserInfo, Seg
|
||||
from ..chat.message import Message
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from src.plugins.chat.message import MessageSending
|
||||
from ..message.api import global_api
|
||||
@@ -30,7 +30,7 @@ class GoalAnalyzer:
|
||||
"""对话目标分析器"""
|
||||
|
||||
def __init__(self, stream_id: str):
|
||||
self.llm = LLM_request(
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
|
||||
)
|
||||
|
||||
@@ -350,7 +350,7 @@ class DirectMessageSender:
|
||||
# logger.info(f"发送消息到{end_point}")
|
||||
# logger.info(message_json)
|
||||
try:
|
||||
await global_api.send_message_REST(end_point, message_json)
|
||||
await global_api.send_message_rest(end_point, message_json)
|
||||
except Exception as e:
|
||||
logger.error(f"REST方式发送失败,出现错误: {str(e)}")
|
||||
logger.info("尝试使用ws发送")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import List, Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from src.plugins.memory_system.Hippocampus import HippocampusManager
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from ..chat.message import Message
|
||||
|
||||
@@ -12,7 +12,7 @@ class KnowledgeFetcher:
|
||||
"""知识调取器"""
|
||||
|
||||
def __init__(self):
|
||||
self.llm = LLM_request(
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=1000,
|
||||
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
import datetime
|
||||
from typing import Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from .chat_observer import ChatObserver
|
||||
from ..message.message_base import UserInfo
|
||||
@@ -14,7 +14,7 @@ class ReplyChecker:
|
||||
"""回复检查器"""
|
||||
|
||||
def __init__(self, stream_id: str):
|
||||
self.llm = LLM_request(
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal, temperature=0.7, max_tokens=1000, request_type="reply_check"
|
||||
)
|
||||
self.name = global_config.BOT_NICKNAME
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Tuple
|
||||
from src.common.logger import get_module_logger
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from .chat_observer import ChatObserver
|
||||
from .reply_checker import ReplyChecker
|
||||
@@ -15,7 +15,7 @@ class ReplyGenerator:
|
||||
"""回复生成器"""
|
||||
|
||||
def __init__(self, stream_id: str):
|
||||
self.llm = LLM_request(
|
||||
self.llm = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=300,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from .emoji_manager import emoji_manager
|
||||
from ..person_info.relationship_manager import relationship_manager
|
||||
from .chat_stream import chat_manager
|
||||
from .message_sender import message_manager
|
||||
from .messagesender import message_manager
|
||||
from ..storage.storage import MessageStorage
|
||||
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ class ChatBot:
|
||||
|
||||
self._started = True
|
||||
|
||||
async def _create_PFC_chat(self, message: MessageRecv):
|
||||
async def _create_pfc_chat(self, message: MessageRecv):
|
||||
try:
|
||||
chat_id = str(message.chat_stream.stream_id)
|
||||
|
||||
@@ -112,7 +112,7 @@ class ChatBot:
|
||||
)
|
||||
message.update_chat_stream(chat)
|
||||
await self.only_process_chat.process_message(message)
|
||||
await self._create_PFC_chat(message)
|
||||
await self._create_pfc_chat(message)
|
||||
else:
|
||||
if groupinfo.group_id in global_config.talk_allowed_groups:
|
||||
# logger.debug(f"开始群聊模式{str(message_data)[:50]}...")
|
||||
|
||||
@@ -13,7 +13,7 @@ from ...common.database import db
|
||||
from ..config.config import global_config
|
||||
from ..chat.utils import get_embedding
|
||||
from ..chat.utils_image import ImageManager, image_path_to_base64
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
logger = get_module_logger("emoji")
|
||||
@@ -34,8 +34,8 @@ class EmojiManager:
|
||||
|
||||
def __init__(self):
|
||||
self._scan_task = None
|
||||
self.vlm = LLM_request(model=global_config.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
|
||||
self.llm_emotion_judge = LLM_request(
|
||||
self.vlm = LLMRequest(model=global_config.vlm, temperature=0.3, max_tokens=1000, request_type="emoji")
|
||||
self.llm_emotion_judge = LLMRequest(
|
||||
model=global_config.llm_emotion_judge, max_tokens=600, temperature=0.8, request_type="emoji"
|
||||
) # 更高的温度,更少的token(后续可以根据情绪来调整温度)
|
||||
|
||||
|
||||
@@ -142,14 +142,16 @@ class MessageRecv(Message):
|
||||
|
||||
def _generate_detailed_text(self) -> str:
|
||||
"""生成详细文本,包含时间和用户信息"""
|
||||
time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
|
||||
# time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
|
||||
time = self.message_info.time
|
||||
user_info = self.message_info.user_info
|
||||
name = (
|
||||
f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
|
||||
if user_info.user_cardname != None
|
||||
else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
|
||||
)
|
||||
return f"[{time_str}] {name}: {self.processed_plain_text}\n"
|
||||
# name = (
|
||||
# f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
|
||||
# if user_info.user_cardname != None
|
||||
# else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
|
||||
# )
|
||||
name = f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
|
||||
return f"[{time}] {name}: {self.processed_plain_text}\n"
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -239,14 +241,16 @@ class MessageProcessBase(Message):
|
||||
|
||||
def _generate_detailed_text(self) -> str:
|
||||
"""生成详细文本,包含时间和用户信息"""
|
||||
time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
|
||||
# time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(self.message_info.time))
|
||||
time = self.message_info.time
|
||||
user_info = self.message_info.user_info
|
||||
name = (
|
||||
f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
|
||||
if user_info.user_cardname != None
|
||||
else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
|
||||
)
|
||||
return f"[{time_str}] {name}: {self.processed_plain_text}\n"
|
||||
# name = (
|
||||
# f"{user_info.user_nickname}(ta的昵称:{user_info.user_cardname},ta的id:{user_info.user_id})"
|
||||
# if user_info.user_cardname != None
|
||||
# else f"{user_info.user_nickname}(ta的id:{user_info.user_id})"
|
||||
# )
|
||||
name = f"<{self.message_info.platform}:{user_info.user_id}:{user_info.user_nickname}:{user_info.user_cardname}>"
|
||||
return f"[{time}] {name}: {self.processed_plain_text}\n"
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -59,20 +59,20 @@ class MessageBuffer:
|
||||
logger.debug(f"被新消息覆盖信息id: {cache_msg.message.message_info.message_id}")
|
||||
|
||||
# 查找最近的处理成功消息(T)
|
||||
recent_F_count = 0
|
||||
recent_f_count = 0
|
||||
for msg_id in reversed(self.buffer_pool[person_id_]):
|
||||
msg = self.buffer_pool[person_id_][msg_id]
|
||||
if msg.result == "T":
|
||||
break
|
||||
elif msg.result == "F":
|
||||
recent_F_count += 1
|
||||
recent_f_count += 1
|
||||
|
||||
# 判断条件:最近T之后有超过3-5条F
|
||||
if recent_F_count >= random.randint(3, 5):
|
||||
if recent_f_count >= random.randint(3, 5):
|
||||
new_msg = CacheMessages(message=message, result="T")
|
||||
new_msg.cache_determination.set()
|
||||
self.buffer_pool[person_id_][message.message_info.message_id] = new_msg
|
||||
logger.debug(f"快速处理消息(已堆积{recent_F_count}条F): {message.message_info.message_id}")
|
||||
logger.debug(f"快速处理消息(已堆积{recent_f_count}条F): {message.message_info.message_id}")
|
||||
return
|
||||
|
||||
# 添加新消息
|
||||
@@ -153,11 +153,11 @@ class MessageBuffer:
|
||||
# 更新当前消息的processed_plain_text
|
||||
if combined_text and combined_text[0] != message.processed_plain_text and is_update:
|
||||
if type == "text":
|
||||
message.processed_plain_text = "".join(combined_text)
|
||||
message.processed_plain_text = ",".join(combined_text)
|
||||
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容到当前消息")
|
||||
elif type == "emoji":
|
||||
combined_text.pop()
|
||||
message.processed_plain_text = "".join(combined_text)
|
||||
message.processed_plain_text = ",".join(combined_text)
|
||||
message.is_emoji = False
|
||||
logger.debug(f"整合了{len(combined_text) - 1}条F消息的内容,覆盖当前emoji消息")
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ sender_config = LogConfig(
|
||||
logger = get_module_logger("msg_sender", config=sender_config)
|
||||
|
||||
|
||||
class Message_Sender:
|
||||
class MessageSender:
|
||||
"""发送器"""
|
||||
|
||||
def __init__(self):
|
||||
@@ -83,7 +83,7 @@ class Message_Sender:
|
||||
# logger.info(f"发送消息到{end_point}")
|
||||
# logger.info(message_json)
|
||||
try:
|
||||
await global_api.send_message_REST(end_point, message_json)
|
||||
await global_api.send_message_rest(end_point, message_json)
|
||||
except Exception as e:
|
||||
logger.error(f"REST方式发送失败,出现错误: {str(e)}")
|
||||
logger.info("尝试使用ws发送")
|
||||
@@ -286,4 +286,4 @@ class MessageManager:
|
||||
# 创建全局消息管理器实例
|
||||
message_manager = MessageManager()
|
||||
# 创建全局发送器实例
|
||||
message_sender = Message_Sender()
|
||||
message_sender = MessageSender()
|
||||
@@ -8,7 +8,7 @@ import jieba
|
||||
import numpy as np
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..utils.typo_generator import ChineseTypoGenerator
|
||||
from ..config.config import global_config
|
||||
from .message import MessageRecv, Message
|
||||
@@ -38,21 +38,35 @@ def db_message_to_str(message_dict: Dict) -> str:
|
||||
return result
|
||||
|
||||
|
||||
def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
|
||||
def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
|
||||
"""检查消息是否提到了机器人"""
|
||||
keywords = [global_config.BOT_NICKNAME]
|
||||
nicknames = global_config.BOT_ALIAS_NAMES
|
||||
reply_probability = 0
|
||||
reply_probability = 0.0
|
||||
is_at = False
|
||||
is_mentioned = False
|
||||
|
||||
if (
|
||||
message.message_info.additional_config is not None
|
||||
and message.message_info.additional_config.get("is_mentioned") is not None
|
||||
):
|
||||
try:
|
||||
reply_probability = float(message.message_info.additional_config.get("is_mentioned"))
|
||||
is_mentioned = True
|
||||
return is_mentioned, reply_probability
|
||||
except Exception as e:
|
||||
logger.warning(e)
|
||||
logger.warning(
|
||||
f"消息中包含不合理的设置 is_mentioned: {message.message_info.additional_config.get('is_mentioned')}"
|
||||
)
|
||||
|
||||
# 判断是否被@
|
||||
if re.search(f"@[\s\S]*?(id:{global_config.BOT_QQ})", message.processed_plain_text):
|
||||
is_at = True
|
||||
is_mentioned = True
|
||||
|
||||
if is_at and global_config.at_bot_inevitable_reply:
|
||||
reply_probability = 1
|
||||
reply_probability = 1.0
|
||||
logger.info("被@,回复概率设置为100%")
|
||||
else:
|
||||
if not is_mentioned:
|
||||
@@ -61,7 +75,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
|
||||
is_mentioned = True
|
||||
|
||||
# 判断内容中是否被提及
|
||||
message_content = re.sub(r"\@[\s\S]*?((\d+))", "", message.processed_plain_text)
|
||||
message_content = re.sub(r"@[\s\S]*?((\d+))", "", message.processed_plain_text)
|
||||
message_content = re.sub(r"回复[\s\S]*?\((\d+)\)的消息,说: ", "", message_content)
|
||||
for keyword in keywords:
|
||||
if keyword in message_content:
|
||||
@@ -70,14 +84,14 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> bool:
|
||||
if nickname in message_content:
|
||||
is_mentioned = True
|
||||
if is_mentioned and global_config.mentioned_bot_inevitable_reply:
|
||||
reply_probability = 1
|
||||
reply_probability = 1.0
|
||||
logger.info("被提及,回复概率设置为100%")
|
||||
return is_mentioned, reply_probability
|
||||
|
||||
|
||||
async def get_embedding(text, request_type="embedding"):
|
||||
"""获取文本的embedding向量"""
|
||||
llm = LLM_request(model=global_config.embedding, request_type=request_type)
|
||||
llm = LLMRequest(model=global_config.embedding, request_type=request_type)
|
||||
# return llm.get_embedding_sync(text)
|
||||
try:
|
||||
embedding = await llm.get_embedding(text)
|
||||
@@ -91,7 +105,7 @@ async def get_recent_group_messages(chat_id: str, limit: int = 12) -> list:
|
||||
"""从数据库获取群组最近的消息记录
|
||||
|
||||
Args:
|
||||
group_id: 群组ID
|
||||
chat_id: 群组ID
|
||||
limit: 获取消息数量,默认12条
|
||||
|
||||
Returns:
|
||||
@@ -331,6 +345,7 @@ def process_llm_response(text: str) -> List[str]:
|
||||
pattern = re.compile(r"[\(\[].*?[\)\]]")
|
||||
# _extracted_contents = pattern.findall(text)
|
||||
_extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
|
||||
|
||||
# 去除 () 和 [] 及其包裹的内容
|
||||
# cleaned_text = pattern.sub("", text)
|
||||
cleaned_text = pattern.sub("", protected_text)
|
||||
@@ -493,16 +508,16 @@ def protect_kaomoji(sentence):
|
||||
"""
|
||||
kaomoji_pattern = re.compile(
|
||||
r"("
|
||||
r"[\(\[(【]" # 左括号
|
||||
r"[(\[(【]" # 左括号
|
||||
r"[^()\[\]()【】]*?" # 非括号字符(惰性匹配)
|
||||
r"[^\u4e00-\u9fa5a-zA-Z0-9\s]" # 非中文、非英文、非数字、非空格字符(必须包含至少一个)
|
||||
r"[^一-龥a-zA-Z0-9\s]" # 非中文、非英文、非数字、非空格字符(必须包含至少一个)
|
||||
r"[^()\[\]()【】]*?" # 非括号字符(惰性匹配)
|
||||
r"[\)\])】]" # 右括号
|
||||
r"[\)\])】" # 右括号
|
||||
r"]"
|
||||
r")"
|
||||
r"|"
|
||||
r"("
|
||||
r"[▼▽・ᴥω・﹏^><≧≦ ̄`´∀ヮДд︿﹀へ。゚╥╯╰︶︹•⁄]{2,15}"
|
||||
r")"
|
||||
r"([▼▽・ᴥω・﹏^><≧≦ ̄`´∀ヮДд︿﹀へ。゚╥╯╰︶︹•⁄]{2,15"
|
||||
r"}"
|
||||
)
|
||||
|
||||
kaomoji_matches = kaomoji_pattern.findall(sentence)
|
||||
@@ -636,3 +651,142 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
|
||||
except Exception as e:
|
||||
logger.error(f"计算消息数量时出错: {str(e)}")
|
||||
return 0, 0
|
||||
|
||||
|
||||
def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
|
||||
"""将时间戳转换为人类可读的时间格式
|
||||
|
||||
Args:
|
||||
timestamp: 时间戳
|
||||
mode: 转换模式,"normal"为标准格式,"relative"为相对时间格式
|
||||
|
||||
Returns:
|
||||
str: 格式化后的时间字符串
|
||||
"""
|
||||
if mode == "normal":
|
||||
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
|
||||
elif mode == "relative":
|
||||
now = time.time()
|
||||
diff = now - timestamp
|
||||
|
||||
if diff < 20:
|
||||
return "刚刚:"
|
||||
elif diff < 60:
|
||||
return f"{int(diff)}秒前:"
|
||||
elif diff < 1800:
|
||||
return f"{int(diff / 60)}分钟前:"
|
||||
elif diff < 3600:
|
||||
return f"{int(diff / 60)}分钟前:\n"
|
||||
elif diff < 86400:
|
||||
return f"{int(diff / 3600)}小时前:\n"
|
||||
elif diff < 604800:
|
||||
return f"{int(diff / 86400)}天前:\n"
|
||||
else:
|
||||
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":"
|
||||
|
||||
|
||||
def parse_text_timestamps(text: str, mode: str = "normal") -> str:
|
||||
"""解析文本中的时间戳并转换为可读时间格式
|
||||
|
||||
Args:
|
||||
text: 包含时间戳的文本,时间戳应以[]包裹
|
||||
mode: 转换模式,传递给translate_timestamp_to_human_readable,"normal"或"relative"
|
||||
|
||||
Returns:
|
||||
str: 替换后的文本
|
||||
|
||||
转换规则:
|
||||
- normal模式: 将文本中所有时间戳转换为可读格式
|
||||
- lite模式:
|
||||
- 第一个和最后一个时间戳必须转换
|
||||
- 以5秒为间隔划分时间段,每段最多转换一个时间戳
|
||||
- 不转换的时间戳替换为空字符串
|
||||
"""
|
||||
# 匹配[数字]或[数字.数字]格式的时间戳
|
||||
pattern = r"\[(\d+(?:\.\d+)?)\]"
|
||||
|
||||
# 找出所有匹配的时间戳
|
||||
matches = list(re.finditer(pattern, text))
|
||||
|
||||
if not matches:
|
||||
return text
|
||||
|
||||
# normal模式: 直接转换所有时间戳
|
||||
if mode == "normal":
|
||||
result_text = text
|
||||
for match in matches:
|
||||
timestamp = float(match.group(1))
|
||||
readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
|
||||
# 由于替换会改变文本长度,需要使用正则替换而非直接替换
|
||||
pattern_instance = re.escape(match.group(0))
|
||||
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
|
||||
return result_text
|
||||
else:
|
||||
# lite模式: 按5秒间隔划分并选择性转换
|
||||
result_text = text
|
||||
|
||||
# 提取所有时间戳及其位置
|
||||
timestamps = [(float(m.group(1)), m) for m in matches]
|
||||
timestamps.sort(key=lambda x: x[0]) # 按时间戳升序排序
|
||||
|
||||
if not timestamps:
|
||||
return text
|
||||
|
||||
# 获取第一个和最后一个时间戳
|
||||
first_timestamp, first_match = timestamps[0]
|
||||
last_timestamp, last_match = timestamps[-1]
|
||||
|
||||
# 将时间范围划分成5秒间隔的时间段
|
||||
time_segments = {}
|
||||
|
||||
# 对所有时间戳按15秒间隔分组
|
||||
for ts, match in timestamps:
|
||||
segment_key = int(ts // 15) # 将时间戳除以15取整,作为时间段的键
|
||||
if segment_key not in time_segments:
|
||||
time_segments[segment_key] = []
|
||||
time_segments[segment_key].append((ts, match))
|
||||
|
||||
# 记录需要转换的时间戳
|
||||
to_convert = []
|
||||
|
||||
# 从每个时间段中选择一个时间戳进行转换
|
||||
for _, segment_timestamps in time_segments.items():
|
||||
# 选择这个时间段中的第一个时间戳
|
||||
to_convert.append(segment_timestamps[0])
|
||||
|
||||
# 确保第一个和最后一个时间戳在转换列表中
|
||||
first_in_list = False
|
||||
last_in_list = False
|
||||
|
||||
for ts, _ in to_convert:
|
||||
if ts == first_timestamp:
|
||||
first_in_list = True
|
||||
if ts == last_timestamp:
|
||||
last_in_list = True
|
||||
|
||||
if not first_in_list:
|
||||
to_convert.append((first_timestamp, first_match))
|
||||
if not last_in_list:
|
||||
to_convert.append((last_timestamp, last_match))
|
||||
|
||||
# 创建需要转换的时间戳集合,用于快速查找
|
||||
to_convert_set = {match.group(0) for _, match in to_convert}
|
||||
|
||||
# 首先替换所有不需要转换的时间戳为空字符串
|
||||
for _, match in timestamps:
|
||||
if match.group(0) not in to_convert_set:
|
||||
pattern_instance = re.escape(match.group(0))
|
||||
result_text = re.sub(pattern_instance, "", result_text, count=1)
|
||||
|
||||
# 按照时间戳原始顺序排序,避免替换时位置错误
|
||||
to_convert.sort(key=lambda x: x[1].start())
|
||||
|
||||
# 执行替换
|
||||
# 由于替换会改变文本长度,从后向前替换
|
||||
to_convert.reverse()
|
||||
for ts, match in to_convert:
|
||||
readable_time = translate_timestamp_to_human_readable(ts, "relative")
|
||||
pattern_instance = re.escape(match.group(0))
|
||||
result_text = re.sub(pattern_instance, readable_time, result_text, count=1)
|
||||
|
||||
return result_text
|
||||
|
||||
@@ -9,7 +9,7 @@ import io
|
||||
|
||||
from ...common.database import db
|
||||
from ..config.config import global_config
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
|
||||
from src.common.logger import get_module_logger
|
||||
|
||||
@@ -32,7 +32,7 @@ class ImageManager:
|
||||
self._ensure_description_collection()
|
||||
self._ensure_image_dir()
|
||||
self._initialized = True
|
||||
self._llm = LLM_request(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image")
|
||||
self._llm = LLMRequest(model=global_config.vlm, temperature=0.4, max_tokens=300, request_type="image")
|
||||
|
||||
def _ensure_image_dir(self):
|
||||
"""确保图像存储目录存在"""
|
||||
|
||||
@@ -8,7 +8,7 @@ from ...config.config import global_config
|
||||
from ...chat.emoji_manager import emoji_manager
|
||||
from .reasoning_generator import ResponseGenerator
|
||||
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
|
||||
from ...chat.message_sender import message_manager
|
||||
from ...chat.messagesender import message_manager
|
||||
from ...storage.storage import MessageStorage
|
||||
from ...chat.utils import is_mentioned_bot_in_message
|
||||
from ...chat.utils_image import image_path_to_base64
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import List, Optional, Tuple, Union
|
||||
import random
|
||||
|
||||
from ...models.utils_model import LLM_request
|
||||
from ...models.utils_model import LLMRequest
|
||||
from ...config.config import global_config
|
||||
from ...chat.message import MessageThinking
|
||||
from .reasoning_prompt_builder import prompt_builder
|
||||
@@ -22,20 +22,20 @@ logger = get_module_logger("llm_generator", config=llm_config)
|
||||
|
||||
class ResponseGenerator:
|
||||
def __init__(self):
|
||||
self.model_reasoning = LLM_request(
|
||||
self.model_reasoning = LLMRequest(
|
||||
model=global_config.llm_reasoning,
|
||||
temperature=0.7,
|
||||
max_tokens=3000,
|
||||
request_type="response_reasoning",
|
||||
)
|
||||
self.model_normal = LLM_request(
|
||||
self.model_normal = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_reasoning",
|
||||
)
|
||||
|
||||
self.model_sum = LLM_request(
|
||||
self.model_sum = LLMRequest(
|
||||
model=global_config.llm_summary_by_topic, temperature=0.7, max_tokens=3000, request_type="relation"
|
||||
)
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
@@ -68,7 +68,7 @@ class ResponseGenerator:
|
||||
logger.info(f"{self.current_model_type}思考,失败")
|
||||
return None
|
||||
|
||||
async def _generate_response_with_model(self, message: MessageThinking, model: LLM_request, thinking_id: str):
|
||||
async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):
|
||||
sender_name = ""
|
||||
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
@@ -8,7 +8,7 @@ from ...config.config import global_config
|
||||
from ...chat.emoji_manager import emoji_manager
|
||||
from .think_flow_generator import ResponseGenerator
|
||||
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
|
||||
from ...chat.message_sender import message_manager
|
||||
from ...chat.messagesender import message_manager
|
||||
from ...storage.storage import MessageStorage
|
||||
from ...chat.utils import is_mentioned_bot_in_message, get_recent_group_detailed_plain_text
|
||||
from ...chat.utils_image import image_path_to_base64
|
||||
@@ -258,7 +258,7 @@ class ThinkFlowChat:
|
||||
await heartflow.get_subheartflow(chat.stream_id).do_observe()
|
||||
except Exception as e:
|
||||
logger.error(f"心流观察失败: {e}")
|
||||
traceback.print_exc()
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
info_catcher.catch_after_observe(timing_results["观察"])
|
||||
|
||||
@@ -329,13 +329,17 @@ class ThinkFlowChat:
|
||||
chat.stream_id
|
||||
).do_thinking_before_reply(
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=message.message_info.user_info.user_nickname,
|
||||
sender_info=message.message_info.user_info,
|
||||
chat_stream=chat,
|
||||
obs_id=get_mid_memory_id,
|
||||
extra_info=tool_result_info,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"心流思考前脑内状态失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
# 确保变量被定义,即使在错误情况下
|
||||
current_mind = ""
|
||||
past_mind = ""
|
||||
|
||||
info_catcher.catch_afer_shf_step(timing_results["思考前脑内状态"], past_mind, current_mind)
|
||||
|
||||
@@ -373,6 +377,7 @@ class ThinkFlowChat:
|
||||
except Exception as e:
|
||||
logger.error(f"心流处理表情包失败: {e}")
|
||||
|
||||
# 思考后脑内状态更新
|
||||
try:
|
||||
with Timer("思考后脑内状态更新", timing_results):
|
||||
stream_id = message.chat_stream.stream_id
|
||||
@@ -387,10 +392,44 @@ class ThinkFlowChat:
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"心流思考后脑内状态更新失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
# 回复后处理
|
||||
await willing_manager.after_generate_reply_handle(message.message_info.message_id)
|
||||
|
||||
# 处理认识关系
|
||||
try:
|
||||
is_known = await relationship_manager.is_known_some_one(
|
||||
message.message_info.platform, message.message_info.user_info.user_id
|
||||
)
|
||||
if not is_known:
|
||||
logger.info(f"首次认识用户: {message.message_info.user_info.user_nickname}")
|
||||
await relationship_manager.first_knowing_some_one(
|
||||
message.message_info.platform,
|
||||
message.message_info.user_info.user_id,
|
||||
message.message_info.user_info.user_nickname,
|
||||
message.message_info.user_info.user_cardname
|
||||
or message.message_info.user_info.user_nickname,
|
||||
"",
|
||||
)
|
||||
else:
|
||||
logger.debug(f"已认识用户: {message.message_info.user_info.user_nickname}")
|
||||
if not await relationship_manager.is_qved_name(
|
||||
message.message_info.platform, message.message_info.user_info.user_id
|
||||
):
|
||||
logger.info(f"更新已认识但未取名的用户: {message.message_info.user_info.user_nickname}")
|
||||
await relationship_manager.first_knowing_some_one(
|
||||
message.message_info.platform,
|
||||
message.message_info.user_info.user_id,
|
||||
message.message_info.user_info.user_nickname,
|
||||
message.message_info.user_info.user_cardname
|
||||
or message.message_info.user_info.user_nickname,
|
||||
"",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"处理认识关系失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"心流处理消息失败: {e}")
|
||||
logger.error(traceback.format_exc())
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import List, Optional
|
||||
import random
|
||||
|
||||
|
||||
from ...models.utils_model import LLM_request
|
||||
from ...models.utils_model import LLMRequest
|
||||
from ...config.config import global_config
|
||||
from ...chat.message import MessageRecv
|
||||
from .think_flow_prompt_builder import prompt_builder
|
||||
@@ -25,14 +25,14 @@ logger = get_module_logger("llm_generator", config=llm_config)
|
||||
|
||||
class ResponseGenerator:
|
||||
def __init__(self):
|
||||
self.model_normal = LLM_request(
|
||||
self.model_normal = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_heartflow",
|
||||
)
|
||||
|
||||
self.model_sum = LLM_request(
|
||||
self.model_sum = LLMRequest(
|
||||
model=global_config.llm_summary_by_topic, temperature=0.6, max_tokens=2000, request_type="relation"
|
||||
)
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
@@ -94,21 +94,23 @@ class ResponseGenerator:
|
||||
return None
|
||||
|
||||
async def _generate_response_with_model(
|
||||
self, message: MessageRecv, model: LLM_request, thinking_id: str, mode: str = "normal"
|
||||
self, message: MessageRecv, model: LLMRequest, thinking_id: str, mode: str = "normal"
|
||||
) -> str:
|
||||
sender_name = ""
|
||||
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
sender_name = (
|
||||
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
|
||||
f"{message.chat_stream.user_info.user_cardname}"
|
||||
)
|
||||
elif message.chat_stream.user_info.user_nickname:
|
||||
sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
|
||||
else:
|
||||
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||
# if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
# sender_name = (
|
||||
# f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
|
||||
# f"{message.chat_stream.user_info.user_cardname}"
|
||||
# )
|
||||
# elif message.chat_stream.user_info.user_nickname:
|
||||
# sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
|
||||
# else:
|
||||
# sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||
|
||||
sender_name = f"<{message.chat_stream.user_info.platform}:{message.chat_stream.user_info.user_id}:{message.chat_stream.user_info.user_nickname}:{message.chat_stream.user_info.user_cardname}>"
|
||||
|
||||
# 构建prompt
|
||||
with Timer() as t_build_prompt:
|
||||
@@ -119,14 +121,7 @@ class ResponseGenerator:
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
elif mode == "simple":
|
||||
prompt = await prompt_builder._build_prompt_simple(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
)
|
||||
logger.info(f"构建{mode}prompt时间: {t_build_prompt.human_readable}")
|
||||
logger.info(f"构建prompt时间: {t_build_prompt.human_readable}")
|
||||
|
||||
try:
|
||||
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||
@@ -141,49 +136,6 @@ class ResponseGenerator:
|
||||
|
||||
return content
|
||||
|
||||
async def _check_response_with_model(
|
||||
self, message: MessageRecv, content: str, model: LLM_request, thinking_id: str
|
||||
) -> str:
|
||||
_info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
sender_name = ""
|
||||
if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname:
|
||||
sender_name = (
|
||||
f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]"
|
||||
f"{message.chat_stream.user_info.user_cardname}"
|
||||
)
|
||||
elif message.chat_stream.user_info.user_nickname:
|
||||
sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}"
|
||||
else:
|
||||
sender_name = f"用户({message.chat_stream.user_info.user_id})"
|
||||
|
||||
# 构建prompt
|
||||
with Timer() as t_build_prompt_check:
|
||||
prompt = await prompt_builder._build_prompt_check_response(
|
||||
message.chat_stream,
|
||||
message_txt=message.processed_plain_text,
|
||||
sender_name=sender_name,
|
||||
stream_id=message.chat_stream.stream_id,
|
||||
content=content,
|
||||
)
|
||||
logger.info(f"构建check_prompt: {prompt}")
|
||||
logger.info(f"构建check_prompt时间: {t_build_prompt_check.human_readable}")
|
||||
|
||||
try:
|
||||
checked_content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||
|
||||
# info_catcher.catch_after_llm_generated(
|
||||
# prompt=prompt,
|
||||
# response=content,
|
||||
# reasoning_content=reasoning_content,
|
||||
# model_name=self.current_model_name)
|
||||
|
||||
except Exception:
|
||||
logger.exception("检查回复时出错")
|
||||
return None
|
||||
|
||||
return checked_content
|
||||
|
||||
async def _get_emotion_tags(self, content: str, processed_plain_text: str):
|
||||
"""提取情感标签,结合立场和情绪"""
|
||||
try:
|
||||
|
||||
@@ -8,6 +8,8 @@ from src.common.logger import get_module_logger
|
||||
from ....individuality.individuality import Individuality
|
||||
from src.heart_flow.heartflow import heartflow
|
||||
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.plugins.person_info.relationship_manager import relationship_manager
|
||||
from src.plugins.chat.utils import parse_text_timestamps
|
||||
|
||||
logger = get_module_logger("prompt")
|
||||
|
||||
@@ -161,6 +163,9 @@ class PromptBuilder:
|
||||
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
|
||||
)
|
||||
|
||||
prompt = await relationship_manager.convert_all_person_sign_to_person_name(prompt)
|
||||
prompt = parse_text_timestamps(prompt, mode="lite")
|
||||
|
||||
return prompt
|
||||
|
||||
async def _build_prompt_simple(
|
||||
|
||||
@@ -26,9 +26,9 @@ config_config = LogConfig(
|
||||
logger = get_module_logger("config", config=config_config)
|
||||
|
||||
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
|
||||
is_test = False
|
||||
mai_version_main = "0.6.2"
|
||||
mai_version_fix = ""
|
||||
is_test = True
|
||||
mai_version_main = "0.6.3"
|
||||
mai_version_fix = "snapshot-1"
|
||||
|
||||
if mai_version_fix:
|
||||
if is_test:
|
||||
@@ -62,8 +62,7 @@ def update_config():
|
||||
shutil.copy2(template_path, old_config_path)
|
||||
logger.info(f"已创建新配置文件,请填写后重新运行: {old_config_path}")
|
||||
# 如果是新创建的配置文件,直接返回
|
||||
quit()
|
||||
return
|
||||
return quit()
|
||||
|
||||
# 读取旧配置文件和模板文件
|
||||
with open(old_config_path, "r", encoding="utf-8") as f:
|
||||
|
||||
@@ -9,7 +9,7 @@ import networkx as nx
|
||||
import numpy as np
|
||||
from collections import Counter
|
||||
from ...common.database import db
|
||||
from ...plugins.models.utils_model import LLM_request
|
||||
from ...plugins.models.utils_model import LLMRequest
|
||||
from src.common.logger import get_module_logger, LogConfig, MEMORY_STYLE_CONFIG
|
||||
from src.plugins.memory_system.sample_distribution import MemoryBuildScheduler # 分布生成器
|
||||
from .memory_config import MemoryConfig
|
||||
@@ -91,7 +91,7 @@ memory_config = LogConfig(
|
||||
logger = get_module_logger("memory_system", config=memory_config)
|
||||
|
||||
|
||||
class Memory_graph:
|
||||
class MemoryGraph:
|
||||
def __init__(self):
|
||||
self.G = nx.Graph() # 使用 networkx 的图结构
|
||||
|
||||
@@ -229,7 +229,7 @@ class Memory_graph:
|
||||
# 海马体
|
||||
class Hippocampus:
|
||||
def __init__(self):
|
||||
self.memory_graph = Memory_graph()
|
||||
self.memory_graph = MemoryGraph()
|
||||
self.llm_topic_judge = None
|
||||
self.llm_summary_by_topic = None
|
||||
self.entorhinal_cortex = None
|
||||
@@ -243,8 +243,8 @@ class Hippocampus:
|
||||
self.parahippocampal_gyrus = ParahippocampalGyrus(self)
|
||||
# 从数据库加载记忆图
|
||||
self.entorhinal_cortex.sync_memory_from_db()
|
||||
self.llm_topic_judge = LLM_request(self.config.llm_topic_judge, request_type="memory")
|
||||
self.llm_summary_by_topic = LLM_request(self.config.llm_summary_by_topic, request_type="memory")
|
||||
self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory")
|
||||
self.llm_summary_by_topic = LLMRequest(self.config.llm_summary_by_topic, request_type="memory")
|
||||
|
||||
def get_all_node_names(self) -> list:
|
||||
"""获取记忆图中所有节点的名字列表"""
|
||||
@@ -346,7 +346,8 @@ class Hippocampus:
|
||||
|
||||
Args:
|
||||
text (str): 输入文本
|
||||
num (int, optional): 需要返回的记忆数量。默认为5。
|
||||
max_memory_num (int, optional): 记忆数量限制。默认为3。
|
||||
max_memory_length (int, optional): 记忆长度限制。默认为2。
|
||||
max_depth (int, optional): 记忆检索深度。默认为2。
|
||||
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
|
||||
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
|
||||
@@ -540,7 +541,6 @@ class Hippocampus:
|
||||
|
||||
Args:
|
||||
text (str): 输入文本
|
||||
num (int, optional): 需要返回的记忆数量。默认为5。
|
||||
max_depth (int, optional): 记忆检索深度。默认为2。
|
||||
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
|
||||
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
|
||||
@@ -937,7 +937,7 @@ class EntorhinalCortex:
|
||||
# 海马体
|
||||
class Hippocampus:
|
||||
def __init__(self):
|
||||
self.memory_graph = Memory_graph()
|
||||
self.memory_graph = MemoryGraph()
|
||||
self.llm_topic_judge = None
|
||||
self.llm_summary_by_topic = None
|
||||
self.entorhinal_cortex = None
|
||||
@@ -951,8 +951,8 @@ class Hippocampus:
|
||||
self.parahippocampal_gyrus = ParahippocampalGyrus(self)
|
||||
# 从数据库加载记忆图
|
||||
self.entorhinal_cortex.sync_memory_from_db()
|
||||
self.llm_topic_judge = LLM_request(self.config.llm_topic_judge, request_type="memory")
|
||||
self.llm_summary_by_topic = LLM_request(self.config.llm_summary_by_topic, request_type="memory")
|
||||
self.llm_topic_judge = LLMRequest(self.config.llm_topic_judge, request_type="memory")
|
||||
self.llm_summary_by_topic = LLMRequest(self.config.llm_summary_by_topic, request_type="memory")
|
||||
|
||||
def get_all_node_names(self) -> list:
|
||||
"""获取记忆图中所有节点的名字列表"""
|
||||
@@ -1054,8 +1054,9 @@ class Hippocampus:
|
||||
|
||||
Args:
|
||||
text (str): 输入文本
|
||||
num (int, optional): 需要返回的记忆数量。默认为5。
|
||||
max_depth (int, optional): 记忆检索深度。默认为2。
|
||||
max_memory_num (int, optional): 返回的记忆条目数量上限。默认为3,表示最多返回3条与输入文本相关度最高的记忆。
|
||||
max_memory_length (int, optional): 每个主题最多返回的记忆条目数量。默认为2,表示每个主题最多返回2条相似度最高的记忆。
|
||||
max_depth (int, optional): 记忆检索深度。默认为3。值越大,检索范围越广,可以获取更多间接相关的记忆,但速度会变慢。
|
||||
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
|
||||
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
|
||||
如果为False,使用LLM提取关键词,速度较慢但更准确。
|
||||
@@ -1248,7 +1249,6 @@ class Hippocampus:
|
||||
|
||||
Args:
|
||||
text (str): 输入文本
|
||||
num (int, optional): 需要返回的记忆数量。默认为5。
|
||||
max_depth (int, optional): 记忆检索深度。默认为2。
|
||||
fast_retrieval (bool, optional): 是否使用快速检索。默认为False。
|
||||
如果为True,使用jieba分词和TF-IDF提取关键词,速度更快但可能不够准确。
|
||||
|
||||
@@ -177,7 +177,7 @@ def remove_mem_edge(hippocampus: Hippocampus):
|
||||
|
||||
# 修改节点信息
|
||||
def alter_mem_node(hippocampus: Hippocampus):
|
||||
batchEnviroment = dict()
|
||||
batch_environment = dict()
|
||||
while True:
|
||||
concept = input("请输入节点概念名(输入'终止'以结束):\n")
|
||||
if concept.lower() == "终止":
|
||||
@@ -229,7 +229,7 @@ def alter_mem_node(hippocampus: Hippocampus):
|
||||
break
|
||||
|
||||
try:
|
||||
user_exec(command, node_environment, batchEnviroment)
|
||||
user_exec(command, node_environment, batch_environment)
|
||||
except Exception as e:
|
||||
console.print(e)
|
||||
console.print(
|
||||
@@ -239,7 +239,7 @@ def alter_mem_node(hippocampus: Hippocampus):
|
||||
|
||||
# 修改边信息
|
||||
def alter_mem_edge(hippocampus: Hippocampus):
|
||||
batchEnviroment = dict()
|
||||
batch_enviroment = dict()
|
||||
while True:
|
||||
source = input("请输入 **第一个节点** 名称(输入'终止'以结束):\n")
|
||||
if source.lower() == "终止":
|
||||
@@ -262,21 +262,21 @@ def alter_mem_edge(hippocampus: Hippocampus):
|
||||
console.print("[yellow]你将获得一个执行任意代码的环境[/yellow]")
|
||||
console.print("[red]你已经被警告过了。[/red]\n")
|
||||
|
||||
edgeEnviroment = {"source": "<节点名>", "target": "<节点名>", "strength": "<强度值,装在一个list里>"}
|
||||
edge_environment = {"source": "<节点名>", "target": "<节点名>", "strength": "<强度值,装在一个list里>"}
|
||||
console.print(
|
||||
"[green]环境变量中会有env与batchEnv两个dict, env在切换节点时会清空, batchEnv在操作终止时才会清空[/green]"
|
||||
)
|
||||
console.print(
|
||||
f"[green] env 会被初始化为[/green]\n{edgeEnviroment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
|
||||
f"[green] env 会被初始化为[/green]\n{edge_environment}\n[green]且会在用户代码执行完毕后被提交 [/green]"
|
||||
)
|
||||
console.print(
|
||||
"[yellow]为便于书写临时脚本,请手动在输入代码通过Ctrl+C等方式触发KeyboardInterrupt来结束代码执行[/yellow]"
|
||||
)
|
||||
|
||||
# 拷贝数据以防操作炸了
|
||||
edgeEnviroment["strength"] = [edge["strength"]]
|
||||
edgeEnviroment["source"] = source
|
||||
edgeEnviroment["target"] = target
|
||||
edge_environment["strength"] = [edge["strength"]]
|
||||
edge_environment["source"] = source
|
||||
edge_environment["target"] = target
|
||||
|
||||
while True:
|
||||
|
||||
@@ -288,8 +288,8 @@ def alter_mem_edge(hippocampus: Hippocampus):
|
||||
except KeyboardInterrupt:
|
||||
# 稍微防一下小天才
|
||||
try:
|
||||
if isinstance(edgeEnviroment["strength"][0], int):
|
||||
edge["strength"] = edgeEnviroment["strength"][0]
|
||||
if isinstance(edge_environment["strength"][0], int):
|
||||
edge["strength"] = edge_environment["strength"][0]
|
||||
else:
|
||||
raise Exception
|
||||
|
||||
@@ -301,7 +301,7 @@ def alter_mem_edge(hippocampus: Hippocampus):
|
||||
break
|
||||
|
||||
try:
|
||||
user_exec(command, edgeEnviroment, batchEnviroment)
|
||||
user_exec(command, edge_environment, batch_enviroment)
|
||||
except Exception as e:
|
||||
console.print(e)
|
||||
console.print(
|
||||
|
||||
@@ -10,7 +10,7 @@ from src.common.logger import get_module_logger
|
||||
logger = get_module_logger("offline_llm")
|
||||
|
||||
|
||||
class LLM_request_off:
|
||||
class LLMRequestOff:
|
||||
def __init__(self, model_name="deepseek-ai/DeepSeek-V3", **kwargs):
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
|
||||
@@ -233,7 +233,8 @@ class MessageServer(BaseMessageHandler):
|
||||
async def send_message(self, message: MessageBase):
|
||||
await self.broadcast_to_platform(message.message_info.platform, message.to_dict())
|
||||
|
||||
async def send_message_REST(self, url: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
@staticmethod
|
||||
async def send_message_rest(url: str, data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""发送消息到指定端点"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
try:
|
||||
|
||||
@@ -16,7 +16,7 @@ from ..config.config import global_config
|
||||
logger = get_module_logger("model_utils")
|
||||
|
||||
|
||||
class LLM_request:
|
||||
class LLMRequest:
|
||||
# 定义需要转换的模型列表,作为类变量避免重复
|
||||
MODELS_NEEDING_TRANSFORMATION = [
|
||||
"o3-mini",
|
||||
|
||||
@@ -6,6 +6,9 @@ from typing import Any, Callable, Dict
|
||||
import datetime
|
||||
import asyncio
|
||||
import numpy as np
|
||||
from src.plugins.models.utils_model import LLMRequest
|
||||
from src.plugins.config.config import global_config
|
||||
from src.individuality.individuality import Individuality
|
||||
|
||||
import matplotlib
|
||||
|
||||
@@ -13,6 +16,8 @@ matplotlib.use("Agg")
|
||||
import matplotlib.pyplot as plt
|
||||
from pathlib import Path
|
||||
import pandas as pd
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
"""
|
||||
@@ -32,6 +37,8 @@ logger = get_module_logger("person_info")
|
||||
|
||||
person_info_default = {
|
||||
"person_id": None,
|
||||
"person_name": None,
|
||||
"name_reason": None,
|
||||
"platform": None,
|
||||
"user_id": None,
|
||||
"nickname": None,
|
||||
@@ -48,16 +55,42 @@ person_info_default = {
|
||||
|
||||
class PersonInfoManager:
|
||||
def __init__(self):
|
||||
self.person_name_list = {}
|
||||
self.qv_name_llm = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
max_tokens=256,
|
||||
request_type="qv_name",
|
||||
)
|
||||
if "person_info" not in db.list_collection_names():
|
||||
db.create_collection("person_info")
|
||||
db.person_info.create_index("person_id", unique=True)
|
||||
|
||||
# 初始化时读取所有person_name
|
||||
cursor = db.person_info.find({"person_name": {"$exists": True}}, {"person_id": 1, "person_name": 1, "_id": 0})
|
||||
for doc in cursor:
|
||||
if doc.get("person_name"):
|
||||
self.person_name_list[doc["person_id"]] = doc["person_name"]
|
||||
logger.debug(f"已加载 {len(self.person_name_list)} 个用户名称")
|
||||
|
||||
def get_person_id(self, platform: str, user_id: int):
|
||||
"""获取唯一id"""
|
||||
# 如果platform中存在-,就截取-后面的部分
|
||||
if "-" in platform:
|
||||
platform = platform.split("-")[1]
|
||||
|
||||
components = [platform, str(user_id)]
|
||||
key = "_".join(components)
|
||||
return hashlib.md5(key.encode()).hexdigest()
|
||||
|
||||
def is_person_known(self, platform: str, user_id: int):
|
||||
"""判断是否认识某人"""
|
||||
person_id = self.get_person_id(platform, user_id)
|
||||
document = db.person_info.find_one({"person_id": person_id})
|
||||
if document:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
async def create_person_info(self, person_id: str, data: dict = None):
|
||||
"""创建一个项"""
|
||||
if not person_id:
|
||||
@@ -74,7 +107,7 @@ class PersonInfoManager:
|
||||
|
||||
db.person_info.insert_one(_person_info_default)
|
||||
|
||||
async def update_one_field(self, person_id: str, field_name: str, value, Data: dict = None):
|
||||
async def update_one_field(self, person_id: str, field_name: str, value, data: dict = None):
|
||||
"""更新某一个字段,会补全"""
|
||||
if field_name not in person_info_default.keys():
|
||||
logger.debug(f"更新'{field_name}'失败,未定义的字段")
|
||||
@@ -85,9 +118,112 @@ class PersonInfoManager:
|
||||
if document:
|
||||
db.person_info.update_one({"person_id": person_id}, {"$set": {field_name: value}})
|
||||
else:
|
||||
Data[field_name] = value
|
||||
data[field_name] = value
|
||||
logger.debug(f"更新时{person_id}不存在,已新建")
|
||||
await self.create_person_info(person_id, Data)
|
||||
await self.create_person_info(person_id, data)
|
||||
|
||||
@staticmethod
|
||||
async def has_one_field(person_id: str, field_name: str):
|
||||
"""判断是否存在某一个字段"""
|
||||
document = db.person_info.find_one({"person_id": person_id}, {field_name: 1})
|
||||
if document:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _extract_json_from_text(self, text: str) -> dict:
|
||||
"""从文本中提取JSON数据的高容错方法"""
|
||||
try:
|
||||
# 尝试直接解析
|
||||
return json.loads(text)
|
||||
except json.JSONDecodeError:
|
||||
try:
|
||||
# 尝试找到JSON格式的部分
|
||||
json_pattern = r"\{[^{}]*\}"
|
||||
matches = re.findall(json_pattern, text)
|
||||
if matches:
|
||||
return json.loads(matches[0])
|
||||
|
||||
# 如果上面都失败了,尝试提取键值对
|
||||
nickname_pattern = r'"nickname"[:\s]+"([^"]+)"'
|
||||
reason_pattern = r'"reason"[:\s]+"([^"]+)"'
|
||||
|
||||
nickname_match = re.search(nickname_pattern, text)
|
||||
reason_match = re.search(reason_pattern, text)
|
||||
|
||||
if nickname_match:
|
||||
return {
|
||||
"nickname": nickname_match.group(1),
|
||||
"reason": reason_match.group(1) if reason_match else "未提供理由",
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"JSON提取失败: {str(e)}")
|
||||
|
||||
# 如果所有方法都失败了,返回空结果
|
||||
return {"nickname": "", "reason": ""}
|
||||
|
||||
async def qv_person_name(self, person_id: str, user_nickname: str, user_cardname: str, user_avatar: str):
|
||||
"""给某个用户取名"""
|
||||
if not person_id:
|
||||
logger.debug("取名失败:person_id不能为空")
|
||||
return
|
||||
|
||||
old_name = await self.get_value(person_id, "person_name")
|
||||
old_reason = await self.get_value(person_id, "name_reason")
|
||||
|
||||
max_retries = 5 # 最大重试次数
|
||||
current_try = 0
|
||||
existing_names = ""
|
||||
while current_try < max_retries:
|
||||
individuality = Individuality.get_instance()
|
||||
prompt_personality = individuality.get_prompt(type="personality", x_person=2, level=1)
|
||||
bot_name = individuality.personality.bot_nickname
|
||||
|
||||
qv_name_prompt = f"你是{bot_name},你{prompt_personality}"
|
||||
qv_name_prompt += f"现在你想给一个用户取一个昵称,用户是的qq昵称是{user_nickname},"
|
||||
qv_name_prompt += f"用户的qq群昵称名是{user_cardname},"
|
||||
if user_avatar:
|
||||
qv_name_prompt += f"用户的qq头像是{user_avatar},"
|
||||
if old_name:
|
||||
qv_name_prompt += f"你之前叫他{old_name},是因为{old_reason},"
|
||||
|
||||
qv_name_prompt += "\n请根据以上用户信息,想想你叫他什么比较好,请最好使用用户的qq昵称,可以稍作修改"
|
||||
if existing_names:
|
||||
qv_name_prompt += f"\n请注意,以下名称已被使用,不要使用以下昵称:{existing_names}。\n"
|
||||
qv_name_prompt += "请用json给出你的想法,并给出理由,示例如下:"
|
||||
qv_name_prompt += """{
|
||||
"nickname": "昵称",
|
||||
"reason": "理由"
|
||||
}"""
|
||||
logger.debug(f"取名提示词:{qv_name_prompt}")
|
||||
response = await self.qv_name_llm.generate_response(qv_name_prompt)
|
||||
logger.debug(f"取名回复:{response}")
|
||||
result = self._extract_json_from_text(response[0])
|
||||
|
||||
if not result["nickname"]:
|
||||
logger.error("生成的昵称为空,重试中...")
|
||||
current_try += 1
|
||||
continue
|
||||
|
||||
# 检查生成的昵称是否已存在
|
||||
if result["nickname"] not in self.person_name_list.values():
|
||||
# 更新数据库和内存中的列表
|
||||
await self.update_one_field(person_id, "person_name", result["nickname"])
|
||||
# await self.update_one_field(person_id, "nickname", user_nickname)
|
||||
# await self.update_one_field(person_id, "avatar", user_avatar)
|
||||
await self.update_one_field(person_id, "name_reason", result["reason"])
|
||||
|
||||
self.person_name_list[person_id] = result["nickname"]
|
||||
logger.debug(f"用户 {person_id} 的名称已更新为 {result['nickname']},原因:{result['reason']}")
|
||||
return result
|
||||
else:
|
||||
existing_names += f"{result['nickname']}、"
|
||||
|
||||
logger.debug(f"生成的昵称 {result['nickname']} 已存在,重试中...")
|
||||
current_try += 1
|
||||
|
||||
logger.error(f"在{max_retries}次尝试后仍未能生成唯一昵称")
|
||||
return None
|
||||
|
||||
async def del_one_document(self, person_id: str):
|
||||
"""删除指定 person_id 的文档"""
|
||||
|
||||
@@ -4,6 +4,8 @@ import math
|
||||
from bson.decimal128 import Decimal128
|
||||
from .person_info import person_info_manager
|
||||
import time
|
||||
import re
|
||||
import traceback
|
||||
|
||||
relationship_config = LogConfig(
|
||||
# 使用关系专用样式
|
||||
@@ -75,6 +77,60 @@ class RelationshipManager:
|
||||
else:
|
||||
return mood_value / coefficient
|
||||
|
||||
async def is_known_some_one(self, platform, user_id):
|
||||
"""判断是否认识某人"""
|
||||
is_known = person_info_manager.is_person_known(platform, user_id)
|
||||
return is_known
|
||||
|
||||
async def is_qved_name(self, platform, user_id):
|
||||
"""判断是否认识某人"""
|
||||
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||
is_qved = await person_info_manager.has_one_field(person_id, "person_name")
|
||||
old_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
print(f"old_name: {old_name}")
|
||||
print(f"is_qved: {is_qved}")
|
||||
if is_qved and old_name != None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
async def first_knowing_some_one(self, platform, user_id, user_nickname, user_cardname, user_avatar):
|
||||
"""判断是否认识某人"""
|
||||
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||
await person_info_manager.update_one_field(person_id, "nickname", user_nickname)
|
||||
# await person_info_manager.update_one_field(person_id, "user_cardname", user_cardname)
|
||||
# await person_info_manager.update_one_field(person_id, "user_avatar", user_avatar)
|
||||
await person_info_manager.qv_person_name(person_id, user_nickname, user_cardname, user_avatar)
|
||||
|
||||
async def convert_all_person_sign_to_person_name(self, input_text: str):
|
||||
"""将所有人的<platform:user_id:nickname:cardname>格式转换为person_name"""
|
||||
try:
|
||||
# 使用正则表达式匹配<platform:user_id:nickname:cardname>格式
|
||||
all_person = person_info_manager.person_name_list
|
||||
|
||||
pattern = r"<([^:]+):(\d+):([^:]+):([^>]+)>"
|
||||
matches = re.findall(pattern, input_text)
|
||||
|
||||
# 遍历匹配结果,将<platform:user_id:nickname:cardname>替换为person_name
|
||||
result_text = input_text
|
||||
for platform, user_id, nickname, cardname in matches:
|
||||
person_id = person_info_manager.get_person_id(platform, user_id)
|
||||
# 默认使用昵称作为人名
|
||||
person_name = nickname.strip() if nickname.strip() else cardname.strip()
|
||||
|
||||
if person_id in all_person:
|
||||
if all_person[person_id] != None:
|
||||
person_name = all_person[person_id]
|
||||
|
||||
print(f"将<{platform}:{user_id}:{nickname}:{cardname}>替换为{person_name}")
|
||||
|
||||
result_text = result_text.replace(f"<{platform}:{user_id}:{nickname}:{cardname}>", person_name)
|
||||
|
||||
return result_text
|
||||
except Exception:
|
||||
logger.error(traceback.format_exc())
|
||||
return input_text
|
||||
|
||||
async def calculate_update_relationship_value(self, chat_stream: ChatStream, label: str, stance: str) -> tuple:
|
||||
"""计算并变更关系值
|
||||
新的关系值变更计算方式:
|
||||
|
||||
@@ -38,7 +38,7 @@ else:
|
||||
print("将使用默认配置")
|
||||
|
||||
|
||||
class PersonalityEvaluator_direct:
|
||||
class PersonalityEvaluatorDirect:
|
||||
def __init__(self):
|
||||
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
self.scenarios = []
|
||||
@@ -135,7 +135,7 @@ def main():
|
||||
print("\n准备好了吗?按回车键开始...")
|
||||
input()
|
||||
|
||||
evaluator = PersonalityEvaluator_direct()
|
||||
evaluator = PersonalityEvaluatorDirect()
|
||||
final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
|
||||
dimension_counts = {trait: 0 for trait in final_scores.keys()}
|
||||
|
||||
|
||||
@@ -125,11 +125,12 @@ def main():
|
||||
if global_config.remote_enable:
|
||||
"""主函数,启动心跳线程"""
|
||||
# 配置
|
||||
SERVER_URL = "http://hyybuth.xyz:10058"
|
||||
HEARTBEAT_INTERVAL = 300 # 5分钟(秒)
|
||||
server_url = "http://hyybuth.xyz:10058"
|
||||
# server_url = "http://localhost:10058"
|
||||
heartbeat_interval = 300 # 5分钟(秒)
|
||||
|
||||
# 创建并启动心跳线程
|
||||
heartbeat_thread = HeartbeatThread(SERVER_URL, HEARTBEAT_INTERVAL)
|
||||
heartbeat_thread = HeartbeatThread(server_url, heartbeat_interval)
|
||||
heartbeat_thread.start()
|
||||
|
||||
return heartbeat_thread # 返回线程对象,便于外部控制
|
||||
|
||||
@@ -11,7 +11,7 @@ sys.path.append(root_path)
|
||||
|
||||
from src.common.database import db # noqa: E402
|
||||
from src.common.logger import get_module_logger, SCHEDULE_STYLE_CONFIG, LogConfig # noqa: E402
|
||||
from src.plugins.models.utils_model import LLM_request # noqa: E402
|
||||
from src.plugins.models.utils_model import LLMRequest # noqa: E402
|
||||
from src.plugins.config.config import global_config # noqa: E402
|
||||
|
||||
TIME_ZONE = tz.gettz(global_config.TIME_ZONE) # 设置时区
|
||||
@@ -30,13 +30,13 @@ class ScheduleGenerator:
|
||||
|
||||
def __init__(self):
|
||||
# 使用离线LLM模型
|
||||
self.llm_scheduler_all = LLM_request(
|
||||
self.llm_scheduler_all = LLMRequest(
|
||||
model=global_config.llm_reasoning,
|
||||
temperature=global_config.SCHEDULE_TEMPERATURE + 0.3,
|
||||
max_tokens=7000,
|
||||
request_type="schedule",
|
||||
)
|
||||
self.llm_scheduler_doing = LLM_request(
|
||||
self.llm_scheduler_doing = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.SCHEDULE_TEMPERATURE,
|
||||
max_tokens=2048,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
from ..models.utils_model import LLM_request
|
||||
from ..models.utils_model import LLMRequest
|
||||
from ..config.config import global_config
|
||||
from src.common.logger import get_module_logger, LogConfig, TOPIC_STYLE_CONFIG
|
||||
|
||||
@@ -17,7 +17,7 @@ logger = get_module_logger("topic_identifier", config=topic_config)
|
||||
|
||||
class TopicIdentifier:
|
||||
def __init__(self):
|
||||
self.llm_topic_judge = LLM_request(model=global_config.llm_topic_judge, request_type="topic")
|
||||
self.llm_topic_judge = LLMRequest(model=global_config.llm_topic_judge, request_type="topic")
|
||||
|
||||
async def identify_topic_llm(self, text: str) -> Optional[List[str]]:
|
||||
"""识别消息主题,返回主题列表"""
|
||||
|
||||
@@ -119,7 +119,7 @@ class Prompt(str):
|
||||
|
||||
# 解析模板
|
||||
template_args = []
|
||||
result = re.findall(r"\{(.*?)\}", processed_fstr)
|
||||
result = re.findall(r"\{(.*?)}", processed_fstr)
|
||||
for expr in result:
|
||||
if expr and expr not in template_args:
|
||||
template_args.append(expr)
|
||||
@@ -164,7 +164,7 @@ class Prompt(str):
|
||||
processed_template = cls._process_escaped_braces(template)
|
||||
|
||||
template_args = []
|
||||
result = re.findall(r"\{(.*?)\}", processed_template)
|
||||
result = re.findall(r"\{(.*?)}", processed_template)
|
||||
for expr in result:
|
||||
if expr and expr not in template_args:
|
||||
template_args.append(expr)
|
||||
|
||||
@@ -175,13 +175,8 @@ class LLMStatistics:
|
||||
|
||||
def _format_stats_section(self, stats: Dict[str, Any], title: str) -> str:
|
||||
"""格式化统计部分的输出"""
|
||||
output = []
|
||||
output = ["\n" + "-" * 84, f"{title}", "-" * 84, f"总请求数: {stats['total_requests']}"]
|
||||
|
||||
output.append("\n" + "-" * 84)
|
||||
output.append(f"{title}")
|
||||
output.append("-" * 84)
|
||||
|
||||
output.append(f"总请求数: {stats['total_requests']}")
|
||||
if stats["total_requests"] > 0:
|
||||
output.append(f"总Token数: {stats['total_tokens']}")
|
||||
output.append(f"总花费: {stats['total_cost']:.4f}¥")
|
||||
@@ -238,11 +233,7 @@ class LLMStatistics:
|
||||
|
||||
def _format_stats_section_lite(self, stats: Dict[str, Any], title: str) -> str:
|
||||
"""格式化统计部分的输出"""
|
||||
output = []
|
||||
|
||||
output.append("\n" + "-" * 84)
|
||||
output.append(f"{title}")
|
||||
output.append("-" * 84)
|
||||
output = ["\n" + "-" * 84, f"{title}", "-" * 84]
|
||||
|
||||
# output.append(f"总请求数: {stats['total_requests']}")
|
||||
if stats["total_requests"] > 0:
|
||||
@@ -303,8 +294,7 @@ class LLMStatistics:
|
||||
"""将统计结果保存到文件"""
|
||||
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
output = []
|
||||
output.append(f"LLM请求统计报告 (生成时间: {current_time})")
|
||||
output = [f"LLM请求统计报告 (生成时间: {current_time})"]
|
||||
|
||||
# 添加各个时间段的统计
|
||||
sections = [
|
||||
|
||||
Reference in New Issue
Block a user