feat: 添加LPMM知识库配置类及相关字段

This commit is contained in:
墨梓柒
2025-06-07 14:09:55 +08:00
parent ab2b7ec9ed
commit ca66542718
3 changed files with 77 additions and 1 deletions

View File

@@ -32,6 +32,7 @@ from src.config.official_configs import (
FocusChatProcessorConfig,
MessageReceiveConfig,
MaimMessageConfig,
LPMMKnowledgeConfig,
RelationshipConfig,
)
@@ -161,6 +162,7 @@ class Config(ConfigBase):
experimental: ExperimentalConfig
model: ModelConfig
maim_message: MaimMessageConfig
lpmm_knowledge: LPMMKnowledgeConfig
def load_config(config_path: str) -> Config:

View File

@@ -414,6 +414,44 @@ class MaimMessageConfig(ConfigBase):
"""认证令牌用于API验证为空则不启用验证"""
@dataclass
class LPMMKnowledgeConfig(ConfigBase):
"""LPMM知识库配置类"""
enable: bool = True
"""是否启用LPMM知识库"""
rag_synonym_search_top_k: int = 10
"""RAG同义词搜索的Top K数量"""
rag_synonym_threshold: float = 0.8
"""RAG同义词搜索的相似度阈值"""
info_extraction_workers: int = 3
"""信息提取工作线程数"""
qa_relation_search_top_k: int = 10
"""QA关系搜索的Top K数量"""
qa_relation_threshold: float = 0.75
"""QA关系搜索的相似度阈值"""
qa_paragraph_search_top_k: int = 1000
"""QA段落搜索的Top K数量"""
qa_paragraph_node_weight: float = 0.05
"""QA段落节点权重"""
qa_ent_filter_top_k: int = 10
"""QA实体过滤的Top K数量"""
qa_ppr_damping: float = 0.8
"""QA PageRank阻尼系数"""
qa_res_top_k: int = 10
"""QA最终结果的Top K数量"""
@dataclass
class ModelConfig(ConfigBase):
"""模型配置类"""

View File

@@ -1,5 +1,5 @@
[inner]
version = "2.14.0"
version = "2.15.0"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请在修改后将version的值进行变更
@@ -137,6 +137,18 @@ mood_update_interval = 1.0 # 情绪更新间隔 单位秒
mood_decay_rate = 0.95 # 情绪衰减率
mood_intensity_factor = 1.0 # 情绪强度因子
[lpmm_knowledge] # lpmm知识库配置
enable = true # 是否启用lpmm知识库
rag_synonym_search_top_k = 10 # 同义词搜索TopK
rag_synonym_threshold = 0.8 # 同义词阈值(相似度高于此阈值的词语会被认为是同义词)
info_extraction_workers = 3 # 实体提取同时执行线程数非Pro模型不要设置超过5
qa_relation_search_top_k = 10 # 关系搜索TopK
qa_relation_threshold = 0.5 # 关系阈值(相似度高于此阈值的关系会被认为是相关的关系)
qa_paragraph_search_top_k = 1000 # 段落搜索TopK不能过小可能影响搜索结果
qa_paragraph_node_weight = 0.05 # 段落节点权重(在图搜索&PPR计算中的权重当搜索仅使用DPR时此参数不起作用
qa_ent_filter_top_k = 10 # 实体过滤TopK
qa_ppr_damping = 0.8 # PPR阻尼系数
qa_res_top_k = 3 # 最终提供的文段TopK
# keyword_rules 用于设置关键词触发的额外回复知识
# 添加新规则方法:在 keyword_rules 数组中增加一项,格式如下:
@@ -273,7 +285,30 @@ temp = 0.7
enable_thinking = false # 是否启用思考qwen3 only
#------------LPMM知识库模型------------
[model.lpmm_entity_extract] # 实体提取模型
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
temp = 0.2
[model.lpmm_rdf_build] # RDF构建模型
name = "Pro/deepseek-ai/DeepSeek-V3"
provider = "SILICONFLOW"
pri_in = 2
pri_out = 8
temp = 0.2
[model.lpmm_qa] # 问答模型
name = "Pro/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"
provider = "SILICONFLOW"
pri_in = 4.0
pri_out = 16.0
temp = 0.7
[maim_message]
@@ -296,3 +331,4 @@ enable_friend_chat = false # 是否启用好友聊天