diff --git a/Dockerfile b/Dockerfile index 9e1cf16b3..9f12476a6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,7 +7,7 @@ RUN mv env.example config/.env \ && mv src/plugins/chat/bot_config_toml config/bot_config.toml RUN ln -s /MaiMBot/config/.env /MaiMBot/.env \ && ln -s /MaiMBot/config/bot_config.toml /MaiMBot/src/plugins/chat/bot_config.toml -RUN pip install -r requirements.txt +RUN pip install --upgrade -r requirements.txt VOLUME [ "/MaiMBot/config" ] EXPOSE 8080 ENTRYPOINT [ "nb","run" ] \ No newline at end of file diff --git a/README.md b/README.md index d0a41209c..2366fe87b 100644 --- a/README.md +++ b/README.md @@ -52,11 +52,9 @@ NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose restart #### 手动运行 1. **创建Python环境** - 推荐使用conda或其他环境管理来管理你的python环境 + 推荐使用conda或其他虚拟环境进行依赖安装,防止出现依赖版本冲突问题 ```bash - # 安装requirements(还没检查好,可能有包漏了) - conda activate 你的环境 - cd 对应路径 + # 安装requirements pip install -r requirements.txt ``` 2. **MongoDB设置** @@ -68,8 +66,8 @@ NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose restart - 在Napcat的网络设置中添加ws反向代理:ws://localhost:8080/onebot/v11/ws 4. **配置文件设置** - - 把env.example改成.env,并填上你的apikey(硅基流动或deepseekapi) - - 把bot_config_toml改名为bot_config.toml,并填写相关内容,不然无法正常运行 + - 将.env文件打开,填上你的apikey(硅基流动或deepseekapi) + - 将bot_config.toml文件打开,并填写相关内容,不然无法正常运行 #### .env 文件配置说明 ```ini @@ -92,14 +90,10 @@ NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose restart MONGODB_PASSWORD="" # MongoDB密码(可选) MONGODB_AUTH_SOURCE="" # MongoDB认证源(可选) - # API密钥配置 - CHAT_ANY_WHERE_KEY= # ChatAnyWhere API密钥 - SILICONFLOW_KEY= # 硅基流动 API密钥(必填) - DEEP_SEEK_KEY= # DeepSeek API密钥(必填) - - # API地址配置 - CHAT_ANY_WHERE_BASE_URL=https://api.chatanywhere.tech/v1 + #api配置项,建议siliconflow必填,识图需要这个 + SILICONFLOW_KEY= SILICONFLOW_BASE_URL=https://api.siliconflow.cn/v1/ + DEEP_SEEK_KEY= DEEP_SEEK_BASE_URL=https://api.deepseek.com/v1 ``` @@ -158,9 +152,8 @@ NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose restart ``` 5. **运行麦麦** + 在含有bot.py程序的目录下运行(如果使用了虚拟环境需要先进入虚拟环境) ```bash - conda activate 你的环境 - cd 对应路径 nb run ``` 6. **运行其他组件** @@ -205,3 +198,13 @@ NAPCAT_UID=$(id -u) NAPCAT_GID=$(id -g) docker compose restart 纯编程外行,面向cursor编程,很多代码史一样多多包涵 > ⚠️ **警告**:本应用生成内容来自人工智能模型,由 AI 生成,请仔细甄别,请勿用于违反法律的用途,AI生成内容不代表本人观点和立场。 + +## 致谢 +[nonebot2](https://github.com/nonebot/nonebot2): 跨平台 Python 异步聊天机器人框架 +[NapCat](https://github.com/NapNeko/NapCatQQ): 现代化的基于 NTQQ 的 Bot 协议端实现 + +### 贡献者 + +感谢各位大佬! + +[![Contributors](https://contributors-img.web.app/image?repo=SengokuCola/MaiMBot)](https://github.com/SengokuCola/MaiMBot/graphs/contributors) diff --git a/src/plugins/chat/bot_config_toml b/bot_config.toml similarity index 68% rename from src/plugins/chat/bot_config_toml rename to bot_config.toml index fe6b702d8..6730f0481 100644 --- a/src/plugins/chat/bot_config_toml +++ b/bot_config.toml @@ -7,8 +7,8 @@ password = "" # 默认空值 auth_source = "" # 默认空值 [bot] -qq = #填入你的机器人QQ -nickname = "麦麦" +qq = 123456 #填入你的机器人QQ +nickname = "麦麦" #你希望bot被称呼的名字 [message] min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息 @@ -24,7 +24,7 @@ enable_pic_translate = false [response] -api_using = "siliconflow" # 选择大模型API +api_using = "siliconflow" # 选择大模型API,可选值为siliconflow,deepseek,建议使用siliconflow,因为识图api目前只支持siliconflow的deepseek-vl2模型 model_r1_probability = 0.8 # 麦麦回答时选择R1模型的概率 model_v3_probability = 0.1 # 麦麦回答时选择V3模型的概率 model_r1_distill_probability = 0.1 # 麦麦回答时选择R1蒸馏模型的概率 @@ -36,13 +36,13 @@ enable_advance_output = true # 开启后输出更多日志,false关闭true开启 [groups] talk_allowed = [ - #可以回复消息的群 -] + 123456,12345678 +] #可以回复消息的群 talk_frequency_down = [ - #降低回复频率的群 -] + 123456,12345678 +] #降低回复频率的群 ban_user_id = [ - #禁止回复消息的QQ号 -] + 123456,12345678 +] #禁止回复消息的QQ号 diff --git a/docker-compose.yml b/docker-compose.yml index a159c26f3..5cbe87327 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,7 @@ services: environment: - tz=Asia/Shanghai expose: - - "8080:8080" + - "8080" restart: always depends_on: - mongodb diff --git a/env.example b/env.example index c8ed650d6..9988d58f3 100644 --- a/env.example +++ b/env.example @@ -15,10 +15,8 @@ MONGODB_USERNAME = "" # 默认空值 MONGODB_PASSWORD = "" # 默认空值 MONGODB_AUTH_SOURCE = "" # 默认空值 -#key and url -CHAT_ANY_WHERE_KEY= +#api配置项 SILICONFLOW_KEY= -CHAT_ANY_WHERE_BASE_URL=https://api.chatanywhere.tech/v1 SILICONFLOW_BASE_URL=https://api.siliconflow.cn/v1/ DEEP_SEEK_KEY= DEEP_SEEK_BASE_URL=https://api.deepseek.com/v1 diff --git a/requirements.txt b/requirements.txt index 7ddb691bc..3f0fe05dd 100644 Binary files a/requirements.txt and b/requirements.txt differ diff --git a/src/plugins/chat/config.py b/src/plugins/chat/config.py index fa34f2a74..69e59ed5b 100644 --- a/src/plugins/chat/config.py +++ b/src/plugins/chat/config.py @@ -108,7 +108,7 @@ class BotConfig: return config -global_config = BotConfig.load_config("./src/plugins/chat/bot_config.toml") +global_config = BotConfig.load_config(".bot_config.toml") from dotenv import load_dotenv current_dir = os.path.dirname(os.path.abspath(__file__)) diff --git a/src/plugins/schedule/schedule_generator.py b/src/plugins/schedule/schedule_generator.py index f342d119a..d1778d878 100644 --- a/src/plugins/schedule/schedule_generator.py +++ b/src/plugins/schedule/schedule_generator.py @@ -29,7 +29,10 @@ Database.initialize( class ScheduleGenerator: def __init__(self): - self.llm_scheduler = LLMModel(model_name="Pro/deepseek-ai/DeepSeek-V3") + if global_config.API_USING == "siliconflow": + self.llm_scheduler = LLMModel(model_name="Pro/deepseek-ai/DeepSeek-V3") + elif global_config.API_USING == "deepseek": + self.llm_scheduler = LLMModel(model_name="deepseek-chat",api_using="deepseek") self.db = Database.get_instance() today = datetime.datetime.now() diff --git a/src/plugins/schedule/schedule_llm_module.py b/src/plugins/schedule/schedule_llm_module.py index 0f1e71f6c..13945afb3 100644 --- a/src/plugins/schedule/schedule_llm_module.py +++ b/src/plugins/schedule/schedule_llm_module.py @@ -8,11 +8,19 @@ load_dotenv() class LLMModel: # def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs): - def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-R1", **kwargs): - self.model_name = model_name + def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-R1",api_using=None, **kwargs): + if api_using == "deepseek": + self.api_key = os.getenv("DEEPSEEK_API_KEY") + self.base_url = os.getenv("DEEPSEEK_BASE_URL") + if model_name != "Pro/deepseek-ai/DeepSeek-R1": + self.model_name = model_name + else: + self.model_name = "deepseek-reasoner" + else: + self.api_key = os.getenv("SILICONFLOW_KEY") + self.base_url = os.getenv("SILICONFLOW_BASE_URL") + self.model_name = model_name self.params = kwargs - self.api_key = os.getenv("SILICONFLOW_KEY") - self.base_url = os.getenv("SILICONFLOW_BASE_URL") def generate_response(self, prompt: str) -> Tuple[str, str]: """根据输入的提示生成模型的响应"""