diff --git a/README.md b/README.md
index c14ac646e..30e41a368 100644
--- a/README.md
+++ b/README.md
@@ -43,6 +43,7 @@
- [一群](https://qm.qq.com/q/VQ3XZrWgMs) 766798517 ,建议加下面的(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
- [二群](https://qm.qq.com/q/RzmCiRtHEW) 571780722 (开发和建议相关讨论)不一定有空回复,会优先写文档和代码
- [三群](https://qm.qq.com/q/wlH5eT8OmQ) 1035228475(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
+- [四群](https://qm.qq.com/q/wlH5eT8OmQ) 729957033(开发和建议相关讨论)不一定有空回复,会优先写文档和代码
@@ -57,7 +58,7 @@
📚 文档 ⬇️ 快速开始使用麦麦 ⬇️
-### 部署方式
+### 部署方式(忙于开发,部分内容可能过时)
- 📦 **Windows 一键傻瓜式部署**:请运行项目根目录中的 `run.bat`,部署完成后请参照后续配置指南进行配置
diff --git a/bot.py b/bot.py
index a3a844a15..acc7990ed 100644
--- a/bot.py
+++ b/bot.py
@@ -8,14 +8,21 @@ import time
import uvicorn
from dotenv import load_dotenv
-from loguru import logger
from nonebot.adapters.onebot.v11 import Adapter
import platform
+from src.plugins.utils.logger_config import setup_logger
+
+from loguru import logger
+
+# 配置日志格式
# 获取没有加载env时的环境变量
env_mask = {key: os.getenv(key) for key in os.environ}
uvicorn_server = None
+driver = None
+app = None
+loop = None
def easter_egg():
@@ -95,43 +102,7 @@ def load_env():
def load_logger():
- logger.remove()
-
- # 配置日志基础路径
- log_path = os.path.join(os.getcwd(), "logs")
- if not os.path.exists(log_path):
- os.makedirs(log_path)
-
- current_env = os.getenv("ENVIRONMENT", "dev")
-
- # 公共配置参数
- log_level = os.getenv("LOG_LEVEL", "INFO" if current_env == "prod" else "DEBUG")
- log_filter = lambda record: (
- ("nonebot" not in record["name"] or record["level"].no >= logger.level("ERROR").no)
- if current_env == "prod"
- else True
- )
- log_format = (
- "{time:YYYY-MM-DD HH:mm:ss.SSS} "
- "|> {level: <7} "
- "|> {name:.<8}:{function:.<8}:{line: >4} "
- "-> {message}"
- )
-
- # 日志文件储存至/logs
- logger.add(
- os.path.join(log_path, "maimbot_{time:YYYY-MM-DD}.log"),
- rotation="00:00",
- retention="30 days",
- format=log_format,
- colorize=False,
- level=log_level,
- filter=log_filter,
- encoding="utf-8",
- )
-
- # 终端输出
- logger.add(sys.stderr, format=log_format, colorize=True, level=log_level, filter=log_filter)
+ setup_logger()
def scan_provider(env_config: dict):
@@ -203,11 +174,14 @@ def raw_main():
if platform.system().lower() != "windows":
time.tzset()
+ # 配置日志
+ load_logger()
easter_egg()
init_config()
init_env()
load_env()
- load_logger()
+
+ # load_logger()
env_config = {key: os.getenv(key) for key in os.environ}
scan_provider(env_config)
@@ -235,17 +209,21 @@ if __name__ == "__main__":
try:
raw_main()
- global app
app = nonebot.get_asgi()
-
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
- loop.run_until_complete(uvicorn_main())
- except KeyboardInterrupt:
- logger.warning("麦麦会努力做的更好的!正在停止中......")
+
+ try:
+ loop.run_until_complete(uvicorn_main())
+ except KeyboardInterrupt:
+ logger.warning("收到中断信号,正在优雅关闭...")
+ loop.run_until_complete(graceful_shutdown())
+ finally:
+ loop.close()
+
except Exception as e:
- logger.error(f"主程序异常: {e}")
- finally:
- loop.run_until_complete(graceful_shutdown())
- loop.close()
- logger.info("进程终止完毕,麦麦开始休眠......下次再见哦!")
+ logger.error(f"主程序异常: {str(e)}")
+ if loop and not loop.is_closed():
+ loop.run_until_complete(graceful_shutdown())
+ loop.close()
+ sys.exit(1)
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index b90b3d0f3..4290b1f40 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -1,7 +1,6 @@
import re
import time
from random import random
-from loguru import logger
from nonebot.adapters.onebot.v11 import (
Bot,
GroupMessageEvent,
@@ -30,6 +29,10 @@ from .utils_image import image_path_to_base64
from .utils_user import get_user_nickname, get_user_cardname, get_groupname
from .willing_manager import willing_manager # 导入意愿管理器
from .message_base import UserInfo, GroupInfo, Seg
+from ..utils.logger_config import setup_logger, LogModule
+
+# 配置日志
+logger = setup_logger(LogModule.CHAT)
class ChatBot:
diff --git a/src/plugins/chat/emoji_manager.py b/src/plugins/chat/emoji_manager.py
index 76437f8f2..5d6b0bca0 100644
--- a/src/plugins/chat/emoji_manager.py
+++ b/src/plugins/chat/emoji_manager.py
@@ -18,11 +18,17 @@ from ..chat.utils import get_embedding
from ..chat.utils_image import ImageManager, image_path_to_base64
from ..models.utils_model import LLM_request
+from ..utils.logger_config import setup_logger, LogModule
+
+# 配置日志
+logger = setup_logger(LogModule.EMOJI)
+
driver = get_driver()
config = driver.config
image_manager = ImageManager()
+
class EmojiManager:
_instance = None
EMOJI_DIR = "data/emoji" # 表情包存储目录
@@ -154,20 +160,20 @@ class EmojiManager:
# 更新使用次数
db.emoji.update_one({"_id": selected_emoji["_id"]}, {"$inc": {"usage_count": 1}})
- logger.success(
- f"找到匹配的表情包: {selected_emoji.get('description', '无描述')} (相似度: {similarity:.4f})"
+ logger.info(
+ f"[匹配] 找到表情包: {selected_emoji.get('description', '无描述')} (相似度: {similarity:.4f})"
)
# 稍微改一下文本描述,不然容易产生幻觉,描述已经包含 表情包 了
return selected_emoji["path"], "[ %s ]" % selected_emoji.get("description", "无描述")
except Exception as search_error:
- logger.error(f"搜索表情包失败: {str(search_error)}")
+ logger.error(f"[错误] 搜索表情包失败: {str(search_error)}")
return None
return None
except Exception as e:
- logger.error(f"获取表情包失败: {str(e)}")
+ logger.error(f"[错误] 获取表情包失败: {str(e)}")
return None
async def _get_emoji_discription(self, image_base64: str) -> str:
@@ -181,7 +187,7 @@ class EmojiManager:
return description
except Exception as e:
- logger.error(f"获取标签失败: {str(e)}")
+ logger.error(f"[错误] 获取表情包描述失败: {str(e)}")
return None
async def _check_emoji(self, image_base64: str, image_format: str) -> str:
@@ -189,11 +195,11 @@ class EmojiManager:
prompt = f'这是一个表情包,请回答这个表情包是否满足"{global_config.EMOJI_CHECK_PROMPT}"的要求,是则回答是,否则回答否,不要出现任何其他内容'
content, _ = await self.vlm.generate_response_for_image(prompt, image_base64, image_format)
- logger.debug(f"输出描述: {content}")
+ logger.debug(f"[检查] 表情包检查结果: {content}")
return content
except Exception as e:
- logger.error(f"获取标签失败: {str(e)}")
+ logger.error(f"[错误] 表情包检查失败: {str(e)}")
return None
async def _get_kimoji_for_text(self, text: str):
@@ -201,11 +207,11 @@ class EmojiManager:
prompt = f'这是{global_config.BOT_NICKNAME}将要发送的消息内容:\n{text}\n若要为其配上表情包,请你输出这个表情包应该表达怎样的情感,应该给人什么样的感觉,不要太简洁也不要太长,注意不要输出任何对消息内容的分析内容,只输出"一种什么样的感觉"中间的形容词部分。'
content, _ = await self.llm_emotion_judge.generate_response_async(prompt, temperature=1.5)
- logger.info(f"输出描述: {content}")
+ logger.info(f"[情感] 表情包情感描述: {content}")
return content
except Exception as e:
- logger.error(f"获取标签失败: {str(e)}")
+ logger.error(f"[错误] 获取表情包情感失败: {str(e)}")
return None
async def scan_new_emojis(self):
@@ -252,7 +258,7 @@ class EmojiManager:
db.images.update_one({"hash": image_hash}, {"$set": image_doc}, upsert=True)
# 保存描述到image_descriptions集合
image_manager._save_description_to_db(image_hash, description, "emoji")
- logger.success(f"同步已存在的表情包到images集合: {filename}")
+ logger.success(f"[同步] 已同步表情包到images集合: {filename}")
continue
# 检查是否在images集合中已有描述
@@ -268,15 +274,10 @@ class EmojiManager:
check = await self._check_emoji(image_base64, image_format)
if "是" not in check:
os.remove(image_path)
- logger.info(f"描述: {description}")
-
- logger.info(f"描述: {description}")
- logger.info(f"其不满足过滤规则,被剔除 {check}")
+ logger.info(f"[过滤] 表情包描述: {description}")
+ logger.info(f"[过滤] 表情包不满足规则,已移除: {check}")
continue
- logger.info(f"check通过 {check}")
-
- if description is not None:
- embedding = await get_embedding(description)
+ logger.info(f"[检查] 表情包检查通过: {check}")
if description is not None:
embedding = await get_embedding(description)
@@ -293,8 +294,8 @@ class EmojiManager:
# 保存到emoji数据库
db["emoji"].insert_one(emoji_record)
- logger.success(f"注册新表情包: {filename}")
- logger.info(f"描述: {description}")
+ logger.success(f"[注册] 新表情包: {filename}")
+ logger.info(f"[描述] {description}")
# 保存到images数据库
image_doc = {
@@ -307,17 +308,17 @@ class EmojiManager:
db.images.update_one({"hash": image_hash}, {"$set": image_doc}, upsert=True)
# 保存描述到image_descriptions集合
image_manager._save_description_to_db(image_hash, description, "emoji")
- logger.success(f"同步保存到images集合: {filename}")
+ logger.success(f"[同步] 已保存到images集合: {filename}")
else:
- logger.warning(f"跳过表情包: {filename}")
+ logger.warning(f"[跳过] 表情包: {filename}")
except Exception:
- logger.exception("扫描表情包失败")
+ logger.exception("[错误] 扫描表情包失败")
async def _periodic_scan(self, interval_MINS: int = 10):
"""定期扫描新表情包"""
while True:
- logger.info("开始扫描新表情包...")
+ logger.info("[扫描] 开始扫描新表情包...")
await self.scan_new_emojis()
await asyncio.sleep(interval_MINS * 60) # 每600秒扫描一次
@@ -335,48 +336,48 @@ class EmojiManager:
for emoji in all_emojis:
try:
if "path" not in emoji:
- logger.warning(f"发现无效记录(缺少path字段),ID: {emoji.get('_id', 'unknown')}")
+ logger.warning(f"[检查] 发现无效记录(缺少path字段),ID: {emoji.get('_id', 'unknown')}")
db.emoji.delete_one({"_id": emoji["_id"]})
removed_count += 1
continue
if "embedding" not in emoji:
- logger.warning(f"发现过时记录(缺少embedding字段),ID: {emoji.get('_id', 'unknown')}")
+ logger.warning(f"[检查] 发现过时记录(缺少embedding字段),ID: {emoji.get('_id', 'unknown')}")
db.emoji.delete_one({"_id": emoji["_id"]})
removed_count += 1
continue
# 检查文件是否存在
if not os.path.exists(emoji["path"]):
- logger.warning(f"表情包文件已被删除: {emoji['path']}")
+ logger.warning(f"[检查] 表情包文件已被删除: {emoji['path']}")
# 从数据库中删除记录
result = db.emoji.delete_one({"_id": emoji["_id"]})
if result.deleted_count > 0:
- logger.debug(f"成功删除数据库记录: {emoji['_id']}")
+ logger.debug(f"[清理] 成功删除数据库记录: {emoji['_id']}")
removed_count += 1
else:
- logger.error(f"删除数据库记录失败: {emoji['_id']}")
+ logger.error(f"[错误] 删除数据库记录失败: {emoji['_id']}")
continue
if "hash" not in emoji:
- logger.warning(f"发现缺失记录(缺少hash字段),ID: {emoji.get('_id', 'unknown')}")
+ logger.warning(f"[检查] 发现缺失记录(缺少hash字段),ID: {emoji.get('_id', 'unknown')}")
hash = hashlib.md5(open(emoji["path"], "rb").read()).hexdigest()
db.emoji.update_one({"_id": emoji["_id"]}, {"$set": {"hash": hash}})
except Exception as item_error:
- logger.error(f"处理表情包记录时出错: {str(item_error)}")
+ logger.error(f"[错误] 处理表情包记录时出错: {str(item_error)}")
continue
# 验证清理结果
remaining_count = db.emoji.count_documents({})
if removed_count > 0:
- logger.success(f"已清理 {removed_count} 个失效的表情包记录")
- logger.info(f"清理前总数: {total_count} | 清理后总数: {remaining_count}")
+ logger.success(f"[清理] 已清理 {removed_count} 个失效的表情包记录")
+ logger.info(f"[统计] 清理前: {total_count} | 清理后: {remaining_count}")
else:
- logger.info(f"已检查 {total_count} 个表情包记录")
+ logger.info(f"[检查] 已检查 {total_count} 个表情包记录")
except Exception as e:
- logger.error(f"检查表情包完整性失败: {str(e)}")
+ logger.error(f"[错误] 检查表情包完整性失败: {str(e)}")
logger.error(traceback.format_exc())
async def start_periodic_check(self, interval_MINS: int = 120):
diff --git a/src/plugins/memory_system/memory.py b/src/plugins/memory_system/memory.py
index f87f037d5..f5c7181b3 100644
--- a/src/plugins/memory_system/memory.py
+++ b/src/plugins/memory_system/memory.py
@@ -8,9 +8,8 @@ import os
import jieba
import networkx as nx
-from loguru import logger
from nonebot import get_driver
-from ...common.database import db # 使用正确的导入语法
+from ...common.database import db
from ..chat.config import global_config
from ..chat.utils import (
calculate_information_content,
@@ -20,6 +19,13 @@ from ..chat.utils import (
)
from ..models.utils_model import LLM_request
+from ..utils.logger_config import setup_logger, LogModule
+
+# 配置日志
+logger = setup_logger(LogModule.MEMORY)
+
+logger.info("初始化记忆系统")
+
class Memory_graph:
def __init__(self):
self.G = nx.Graph() # 使用 networkx 的图结构
@@ -471,7 +477,7 @@ class Hippocampus:
{'concept': concept},
{'$set': update_data}
)
- logger.info(f"为节点 {concept} 添加缺失的时间字段")
+ logger.info(f"[时间更新] 节点 {concept} 添加缺失的时间字段")
# 获取时间信息(如果不存在则使用当前时间)
created_time = node.get('created_time', current_time)
@@ -504,7 +510,7 @@ class Hippocampus:
{'source': source, 'target': target},
{'$set': update_data}
)
- logger.info(f"为边 {source} - {target} 添加缺失的时间字段")
+ logger.info(f"[时间更新] 边 {source} - {target} 添加缺失的时间字段")
# 获取时间信息(如果不存在则使用当前时间)
created_time = edge.get('created_time', current_time)
@@ -518,16 +524,27 @@ class Hippocampus:
last_modified=last_modified)
if need_update:
- logger.success("已为缺失的时间字段进行补充")
+ logger.success("[数据库] 已为缺失的时间字段进行补充")
async def operation_forget_topic(self, percentage=0.1):
"""随机选择图中一定比例的节点和边进行检查,根据时间条件决定是否遗忘"""
# 检查数据库是否为空
+ # logger.remove()
+
+ logger.info(f"[遗忘] 开始检查数据库... 当前Logger信息:")
+ # logger.info(f"- Logger名称: {logger.name}")
+ logger.info(f"- Logger等级: {logger.level}")
+ # logger.info(f"- Logger处理器: {[handler.__class__.__name__ for handler in logger.handlers]}")
+
+ # logger2 = setup_logger(LogModule.MEMORY)
+ # logger2.info(f"[遗忘] 开始检查数据库... 当前Logger信息:")
+ # logger.info(f"[遗忘] 开始检查数据库... 当前Logger信息:")
+
all_nodes = list(self.memory_graph.G.nodes())
all_edges = list(self.memory_graph.G.edges())
if not all_nodes and not all_edges:
- logger.info("记忆图为空,无需进行遗忘操作")
+ logger.info("[遗忘] 记忆图为空,无需进行遗忘操作")
return
check_nodes_count = max(1, int(len(all_nodes) * percentage))
@@ -542,35 +559,32 @@ class Hippocampus:
current_time = datetime.datetime.now().timestamp()
# 检查并遗忘连接
- logger.info("开始检查连接...")
+ logger.info("[遗忘] 开始检查连接...")
for source, target in edges_to_check:
edge_data = self.memory_graph.G[source][target]
last_modified = edge_data.get('last_modified')
- # print(source,target)
- # print(f"float(last_modified):{float(last_modified)}" )
- # print(f"current_time:{current_time}")
- # print(f"current_time - last_modified:{current_time - last_modified}")
- if current_time - last_modified > 3600*global_config.memory_forget_time: # test
+
+ if current_time - last_modified > 3600*global_config.memory_forget_time:
current_strength = edge_data.get('strength', 1)
new_strength = current_strength - 1
if new_strength <= 0:
self.memory_graph.G.remove_edge(source, target)
edge_changes['removed'] += 1
- logger.info(f"\033[1;31m[连接移除]\033[0m {source} - {target}")
+ logger.info(f"[遗忘] 连接移除: {source} -> {target}")
else:
edge_data['strength'] = new_strength
edge_data['last_modified'] = current_time
edge_changes['weakened'] += 1
- logger.info(f"\033[1;34m[连接减弱]\033[0m {source} - {target} (强度: {current_strength} -> {new_strength})")
+ logger.info(f"[遗忘] 连接减弱: {source} -> {target} (强度: {current_strength} -> {new_strength})")
# 检查并遗忘话题
- logger.info("开始检查节点...")
+ logger.info("[遗忘] 开始检查节点...")
for node in nodes_to_check:
node_data = self.memory_graph.G.nodes[node]
last_modified = node_data.get('last_modified', current_time)
- if current_time - last_modified > 3600*24: # test
+ if current_time - last_modified > 3600*24:
memory_items = node_data.get('memory_items', [])
if not isinstance(memory_items, list):
memory_items = [memory_items] if memory_items else []
@@ -584,27 +598,22 @@ class Hippocampus:
self.memory_graph.G.nodes[node]['memory_items'] = memory_items
self.memory_graph.G.nodes[node]['last_modified'] = current_time
node_changes['reduced'] += 1
- logger.info(f"\033[1;33m[记忆减少]\033[0m {node} (记忆数量: {current_count} -> {len(memory_items)})")
+ logger.info(f"[遗忘] 记忆减少: {node} (数量: {current_count} -> {len(memory_items)})")
else:
self.memory_graph.G.remove_node(node)
node_changes['removed'] += 1
- logger.info(f"\033[1;31m[节点移除]\033[0m {node}")
+ logger.info(f"[遗忘] 节点移除: {node}")
if any(count > 0 for count in edge_changes.values()) or any(count > 0 for count in node_changes.values()):
self.sync_memory_to_db()
- logger.info("\n遗忘操作统计:")
- logger.info(f"连接变化: {edge_changes['weakened']} 个减弱, {edge_changes['removed']} 个移除")
- logger.info(f"节点变化: {node_changes['reduced']} 个减少记忆, {node_changes['removed']} 个移除")
+ logger.info("[遗忘] 统计信息:")
+ logger.info(f"[遗忘] 连接变化: {edge_changes['weakened']} 个减弱, {edge_changes['removed']} 个移除")
+ logger.info(f"[遗忘] 节点变化: {node_changes['reduced']} 个减少记忆, {node_changes['removed']} 个移除")
else:
- logger.info("\n本次检查没有节点或连接满足遗忘条件")
+ logger.info("[遗忘] 本次检查没有节点或连接满足遗忘条件")
async def merge_memory(self, topic):
- """
- 对指定话题的记忆进行合并压缩
-
- Args:
- topic: 要合并的话题节点
- """
+ """对指定话题的记忆进行合并压缩"""
# 获取节点的记忆项
memory_items = self.memory_graph.G.nodes[topic].get('memory_items', [])
if not isinstance(memory_items, list):
@@ -619,8 +628,8 @@ class Hippocampus:
# 拼接成文本
merged_text = "\n".join(selected_memories)
- logger.debug(f"\n[合并记忆] 话题: {topic}")
- logger.debug(f"选择的记忆:\n{merged_text}")
+ logger.debug(f"[合并] 话题: {topic}")
+ logger.debug(f"[合并] 选择的记忆:\n{merged_text}")
# 使用memory_compress生成新的压缩记忆
compressed_memories, _ = await self.memory_compress(selected_memories, 0.1)
@@ -632,11 +641,11 @@ class Hippocampus:
# 添加新的压缩记忆
for _, compressed_memory in compressed_memories:
memory_items.append(compressed_memory)
- logger.info(f"添加压缩记忆: {compressed_memory}")
+ logger.info(f"[合并] 添加压缩记忆: {compressed_memory}")
# 更新节点的记忆项
self.memory_graph.G.nodes[topic]['memory_items'] = memory_items
- logger.debug(f"完成记忆合并,当前记忆数量: {len(memory_items)}")
+ logger.debug(f"[合并] 完成记忆合并,当前记忆数量: {len(memory_items)}")
async def operation_merge_memory(self, percentage=0.1):
"""
@@ -766,7 +775,7 @@ class Hippocampus:
async def memory_activate_value(self, text: str, max_topics: int = 5, similarity_threshold: float = 0.3) -> int:
"""计算输入文本对记忆的激活程度"""
- logger.info(f"识别主题: {await self._identify_topics(text)}")
+ logger.info(f"[激活] 识别主题: {await self._identify_topics(text)}")
# 识别主题
identified_topics = await self._identify_topics(text)
@@ -777,7 +786,7 @@ class Hippocampus:
all_similar_topics = self._find_similar_topics(
identified_topics,
similarity_threshold=similarity_threshold,
- debug_info="记忆激活"
+ debug_info="激活"
)
if not all_similar_topics:
@@ -798,7 +807,7 @@ class Hippocampus:
activation = int(score * 50 * penalty)
logger.info(
- f"[记忆激活]单主题「{topic}」- 相似度: {score:.3f}, 内容数: {content_count}, 激活值: {activation}")
+ f"[激活] 单主题「{topic}」- 相似度: {score:.3f}, 内容数: {content_count}, 激活值: {activation}")
return activation
# 计算关键词匹配率,同时考虑内容数量
@@ -825,8 +834,8 @@ class Hippocampus:
matched_topics.add(input_topic)
adjusted_sim = sim * penalty
topic_similarities[input_topic] = max(topic_similarities.get(input_topic, 0), adjusted_sim)
- logger.info(
- f"[记忆激活]主题「{input_topic}」-> 「{memory_topic}」(内容数: {content_count}, 相似度: {adjusted_sim:.3f})")
+ logger.debug(
+ f"[激活] 主题「{input_topic}」-> 「{memory_topic}」(内容数: {content_count}, 相似度: {adjusted_sim:.3f})")
# 计算主题匹配率和平均相似度
topic_match = len(matched_topics) / len(identified_topics)
@@ -835,7 +844,7 @@ class Hippocampus:
# 计算最终激活值
activation = int((topic_match + average_similarities) / 2 * 100)
logger.info(
- f"[记忆激活]匹配率: {topic_match:.3f}, 平均相似度: {average_similarities:.3f}, 激活值: {activation}")
+ f"[激活] 匹配率: {topic_match:.3f}, 平均相似度: {average_similarities:.3f}, 激活值: {activation}")
return activation
diff --git a/src/plugins/utils/logger_config.py b/src/plugins/utils/logger_config.py
new file mode 100644
index 000000000..cc15d53a4
--- /dev/null
+++ b/src/plugins/utils/logger_config.py
@@ -0,0 +1,71 @@
+import sys
+from loguru import logger
+from enum import Enum
+
+class LogModule(Enum):
+ BASE = "base"
+ MEMORY = "memory"
+ EMOJI = "emoji"
+ CHAT = "chat"
+
+def setup_logger(log_type: LogModule = LogModule.BASE):
+ """配置日志格式
+
+ Args:
+ log_type: 日志类型,可选值:BASE(基础日志)、MEMORY(记忆系统日志)、EMOJI(表情包系统日志)
+ """
+ # 移除默认的处理器
+ logger.remove()
+
+ # 基础日志格式
+ base_format = "{time:HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}"
+
+ chat_format = "{time:HH:mm:ss} | {level: <8} | {name}:{function}:{line} - {message}"
+
+ # 记忆系统日志格式
+ memory_format = "{time:HH:mm} | {level: <8} | 海马体 | {message}"
+
+ # 表情包系统日志格式
+ emoji_format = "{time:HH:mm} | {level: <8} | 表情包 | {function}:{line} - {message}"
+ # 根据日志类型选择日志格式和输出
+ if log_type == LogModule.CHAT:
+ logger.add(
+ sys.stderr,
+ format=chat_format,
+ # level="INFO"
+ )
+ elif log_type == LogModule.MEMORY:
+ # 同时输出到控制台和文件
+ logger.add(
+ sys.stderr,
+ format=memory_format,
+ # level="INFO"
+ )
+ logger.add(
+ "logs/memory.log",
+ format=memory_format,
+ level="INFO",
+ rotation="1 day",
+ retention="7 days"
+ )
+ elif log_type == LogModule.EMOJI:
+ logger.add(
+ sys.stderr,
+ format=emoji_format,
+ # level="INFO"
+ )
+ logger.add(
+ "logs/emoji.log",
+ format=emoji_format,
+ level="INFO",
+ rotation="1 day",
+ retention="7 days"
+ )
+ else: # BASE
+ logger.add(
+ sys.stderr,
+ format=base_format,
+ level="INFO"
+ )
+
+ return logger