From af962c2e84ed68c6cadb54d5d930a3beb1cd644f Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Tue, 11 Mar 2025 16:50:40 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=BA=86=E6=83=85=E7=BB=AA?= =?UTF-8?q?=E7=AE=A1=E7=90=86=E5=99=A8=E6=B2=A1=E6=9C=89=E6=AD=A3=E7=A1=AE?= =?UTF-8?q?=E5=AF=BC=E5=85=A5=E5=AF=BC=E8=87=B4=E5=8F=91=E5=B8=83=E5=87=BA?= =?UTF-8?q?=E6=B6=88=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1 --- src/plugins/chat/bot.py | 11 +++++-- src/plugins/chat/llm_generator.py | 5 +++ src/plugins/chat/message_sender.py | 9 +++--- src/plugins/chat/utils.py | 48 ++++++++++++++--------------- src/plugins/chat/utils_image.py | 4 +++ src/plugins/memory_system/memory.py | 2 +- 6 files changed, 48 insertions(+), 31 deletions(-) diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index 6f18e4da0..81361d81b 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -163,12 +163,16 @@ class ChatBot: response,raw_content = await self.gpt.generate_response(message) + # print(f"response: {response}") if response: + # print(f"有response: {response}") container = message_manager.get_container(chat.stream_id) thinking_message = None # 找到message,删除 + # print(f"开始找思考消息") for msg in container.messages: if isinstance(msg, MessageThinking) and msg.message_info.message_id == think_id: + # print(f"找到思考消息: {msg}") thinking_message = msg container.messages.remove(msg) break @@ -186,14 +190,14 @@ class ChatBot: mark_head = False for msg in response: - print("test") # print(f"\033[1;32m[回复内容]\033[0m {msg}") # 通过时间改变时间戳 typing_time = calculate_typing_time(msg) + print(f"typing_time: {typing_time}") accu_typing_time += typing_time timepoint = tinking_time_point + accu_typing_time - message_segment = Seg(type='text', data=msg) + print(f"message_segment: {message_segment}") bot_message = MessageSending( message_id=think_id, chat_stream=chat, @@ -203,12 +207,15 @@ class ChatBot: is_head=not mark_head, is_emoji=False ) + print(f"bot_message: {bot_message}") if not mark_head: mark_head = True + print(f"添加消息到message_set: {bot_message}") message_set.add_message(bot_message) # message_set 可以直接加入 message_manager # print(f"\033[1;32m[回复]\033[0m 将回复载入发送容器") + print(f"添加message_set到message_manager") message_manager.add_message(message_set) bot_response_time = tinking_time_point diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 517e8aa7a..af7334afe 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -63,6 +63,9 @@ class ResponseGenerator: ) raw_content = model_response + # print(f"raw_content: {raw_content}") + # print(f"model_response: {model_response}") + if model_response: logger.info(f'{global_config.BOT_NICKNAME}的回复是:{model_response}') model_response = await self._process_response(model_response) @@ -200,6 +203,8 @@ class ResponseGenerator: return None, [] processed_response = process_llm_response(content) + + # print(f"得到了处理后的llm返回{processed_response}") return processed_response diff --git a/src/plugins/chat/message_sender.py b/src/plugins/chat/message_sender.py index d5f710bbf..9db74633f 100644 --- a/src/plugins/chat/message_sender.py +++ b/src/plugins/chat/message_sender.py @@ -149,6 +149,7 @@ class MessageManager: """处理聊天流消息""" container = self.get_container(chat_id) if container.has_messages(): + # print(f"处理有message的容器chat_id: {chat_id}") message_earliest = container.get_earliest_message() if isinstance(message_earliest, MessageThinking): @@ -161,15 +162,15 @@ class MessageManager: logger.warning(f"消息思考超时({thinking_time}秒),移除该消息") container.remove_message(message_earliest) else: - print(f"\033[1;34m[调试]\033[0m 消息'{message_earliest.processed_plain_text}'正在发送中") + if message_earliest.is_head and message_earliest.update_thinking_time() > 30: await message_sender.send_message(message_earliest.set_reply()) else: await message_sender.send_message(message_earliest) - - # if message_earliest.is_emoji: - # message_earliest.processed_plain_text = "[表情包]" await message_earliest.process() + + print(f"\033[1;34m[调试]\033[0m 消息'{message_earliest.processed_plain_text}'正在发送中") + await self.storage.store_message(message_earliest, message_earliest.chat_stream,None) container.remove_message(message_earliest) diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py index a889ef177..55fb9eb43 100644 --- a/src/plugins/chat/utils.py +++ b/src/plugins/chat/utils.py @@ -15,6 +15,7 @@ from .config import global_config from .message import MessageThinking, MessageRecv,MessageSending,MessageProcessBase,Message from .message_base import MessageBase,BaseMessageInfo,UserInfo,GroupInfo from .chat_stream import ChatStream +from ..moods.moods import MoodManager driver = get_driver() config = driver.config @@ -72,43 +73,42 @@ def calculate_information_content(text): def get_cloest_chat_from_db(db, length: int, timestamp: str): - """从数据库中获取最接近指定时间戳的聊天记录,并记录读取次数 + """从数据库中获取最接近指定时间戳的聊天记录 + Args: + db: 数据库实例 + length: 要获取的消息数量 + timestamp: 时间戳 + Returns: - list: 消息记录字典列表,每个字典包含消息内容和时间信息 + list: 消息记录列表,每个记录包含时间和文本信息 """ chat_records = [] closest_record = db.db.messages.find_one({"time": {"$lte": timestamp}}, sort=[('time', -1)]) - if closest_record and closest_record.get('memorized', 0) < 4: + if closest_record: closest_time = closest_record['time'] - chat_id = closest_record['chat_id'] # 获取groupid - # 获取该时间戳之后的length条消息,且groupid相同 + chat_id = closest_record['chat_id'] # 获取chat_id + # 获取该时间戳之后的length条消息,保持相同的chat_id chat_records = list(db.db.messages.find( - {"time": {"$gt": closest_time}, "chat_id": chat_id} + { + "time": {"$gt": closest_time}, + "chat_id": chat_id # 添加chat_id过滤 + } ).sort('time', 1).limit(length)) - # 更新每条消息的memorized属性 - for record in records: - current_memorized = record.get('memorized', 0) - if current_memorized > 3: - print("消息已读取3次,跳过") - return '' - - # 更新memorized值 - db.db.messages.update_one( - {"_id": record["_id"]}, - {"$set": {"memorized": current_memorized + 1}} - ) - - # 添加到记录列表中 - chat_records.append({ - 'text': record["detailed_plain_text"], + # 转换记录格式 + formatted_records = [] + for record in chat_records: + formatted_records.append({ 'time': record["time"], - 'group_id': record["group_id"] + 'chat_id': record["chat_id"], + 'detailed_plain_text': record.get("detailed_plain_text", "") # 添加文本内容 }) - return chat_records + return formatted_records + + return [] async def get_recent_group_messages(db, chat_id:str, limit: int = 12) -> list: diff --git a/src/plugins/chat/utils_image.py b/src/plugins/chat/utils_image.py index ac3ff5ac4..208cbf15d 100644 --- a/src/plugins/chat/utils_image.py +++ b/src/plugins/chat/utils_image.py @@ -315,6 +315,10 @@ class ImageManager: prompt = "请用中文描述这张图片的内容。如果有文字,请把文字都描述出来。并尝试猜测这个图片的含义。最多200个字。" description, _ = await self._llm.generate_response_for_image(prompt, image_base64) + if description is None: + logger.warning("AI未能生成图片描述") + return "[图片]" + # 根据配置决定是否保存图片 if global_config.EMOJI_SAVE: # 生成文件名和路径 diff --git a/src/plugins/memory_system/memory.py b/src/plugins/memory_system/memory.py index 0730f9e57..d122252fb 100644 --- a/src/plugins/memory_system/memory.py +++ b/src/plugins/memory_system/memory.py @@ -238,7 +238,7 @@ class Hippocampus: time_info += f"是从 {earliest_str} 到 {latest_str} 的对话:\n" for msg in messages: - input_text += f"{msg['text']}\n" + input_text += f"{msg['detailed_plain_text']}\n" logger.debug(input_text)