Merge pull request #18 from HYY1116/main
chore:修改toml配置文件位置,增加默认配置文件,修改docs
This commit is contained in:
@@ -1,48 +0,0 @@
|
||||
[database]
|
||||
host = "127.0.0.1"
|
||||
port = 27017
|
||||
name = "MegBot"
|
||||
username = "" # 默认空值
|
||||
password = "" # 默认空值
|
||||
auth_source = "" # 默认空值
|
||||
|
||||
[bot]
|
||||
qq = #填入你的机器人QQ
|
||||
nickname = "麦麦"
|
||||
|
||||
[message]
|
||||
min_text_length = 2 # 与麦麦聊天时麦麦只会回答文本大于等于此数的消息
|
||||
max_context_size = 15 # 麦麦获得的上下文数量,超出数量后自动丢弃
|
||||
emoji_chance = 0.2 # 麦麦使用表情包的概率
|
||||
|
||||
[emoji]
|
||||
check_interval = 120
|
||||
register_interval = 10
|
||||
|
||||
[cq_code]
|
||||
enable_pic_translate = false
|
||||
|
||||
|
||||
[response]
|
||||
api_using = "siliconflow" # 选择大模型API
|
||||
model_r1_probability = 0.8 # 麦麦回答时选择R1模型的概率
|
||||
model_v3_probability = 0.1 # 麦麦回答时选择V3模型的概率
|
||||
model_r1_distill_probability = 0.1 # 麦麦回答时选择R1蒸馏模型的概率
|
||||
|
||||
[others]
|
||||
enable_advance_output = true # 开启后输出更多日志,false关闭true开启
|
||||
|
||||
|
||||
[groups]
|
||||
|
||||
talk_allowed = [
|
||||
#可以回复消息的群
|
||||
]
|
||||
|
||||
talk_frequency_down = [
|
||||
#降低回复频率的群
|
||||
]
|
||||
|
||||
ban_user_id = [
|
||||
#禁止回复消息的QQ号
|
||||
]
|
||||
@@ -108,7 +108,7 @@ class BotConfig:
|
||||
|
||||
return config
|
||||
|
||||
global_config = BotConfig.load_config("./src/plugins/chat/bot_config.toml")
|
||||
global_config = BotConfig.load_config(".bot_config.toml")
|
||||
|
||||
from dotenv import load_dotenv
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
@@ -29,7 +29,10 @@ Database.initialize(
|
||||
|
||||
class ScheduleGenerator:
|
||||
def __init__(self):
|
||||
self.llm_scheduler = LLMModel(model_name="Pro/deepseek-ai/DeepSeek-V3")
|
||||
if global_config.API_USING == "siliconflow":
|
||||
self.llm_scheduler = LLMModel(model_name="Pro/deepseek-ai/DeepSeek-V3")
|
||||
elif global_config.API_USING == "deepseek":
|
||||
self.llm_scheduler = LLMModel(model_name="deepseek-chat",api_using="deepseek")
|
||||
self.db = Database.get_instance()
|
||||
|
||||
today = datetime.datetime.now()
|
||||
|
||||
@@ -8,11 +8,19 @@ load_dotenv()
|
||||
|
||||
class LLMModel:
|
||||
# def __init__(self, model_name="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", **kwargs):
|
||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-R1", **kwargs):
|
||||
self.model_name = model_name
|
||||
def __init__(self, model_name="Pro/deepseek-ai/DeepSeek-R1",api_using=None, **kwargs):
|
||||
if api_using == "deepseek":
|
||||
self.api_key = os.getenv("DEEPSEEK_API_KEY")
|
||||
self.base_url = os.getenv("DEEPSEEK_BASE_URL")
|
||||
if model_name != "Pro/deepseek-ai/DeepSeek-R1":
|
||||
self.model_name = model_name
|
||||
else:
|
||||
self.model_name = "deepseek-reasoner"
|
||||
else:
|
||||
self.api_key = os.getenv("SILICONFLOW_KEY")
|
||||
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
|
||||
self.model_name = model_name
|
||||
self.params = kwargs
|
||||
self.api_key = os.getenv("SILICONFLOW_KEY")
|
||||
self.base_url = os.getenv("SILICONFLOW_BASE_URL")
|
||||
|
||||
def generate_response(self, prompt: str) -> Tuple[str, str]:
|
||||
"""根据输入的提示生成模型的响应"""
|
||||
|
||||
Reference in New Issue
Block a user