This commit is contained in:
SengokuCola
2025-04-14 21:36:25 +08:00
16 changed files with 56 additions and 73 deletions

View File

@@ -108,7 +108,7 @@
- [📚 核心Wiki文档](https://docs.mai-mai.org) - 项目最全面的文档中心,你可以了解麦麦有关的一切 - [📚 核心Wiki文档](https://docs.mai-mai.org) - 项目最全面的文档中心,你可以了解麦麦有关的一切
### 最新版本部署教程(MaiCore版本) ### 最新版本部署教程(MaiCore版本)
- [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy.html) - 基于MaiCore的新版本部署方式与旧版本不兼容 - [🚀 最新版本部署教程](https://docs.mai-mai.org/manual/deployment/mmc_deploy_windows.html) - 基于MaiCore的新版本部署方式与旧版本不兼容
## 🎯 功能介绍 ## 🎯 功能介绍

View File

@@ -22,7 +22,7 @@
## [0.0.11] - 2025-3-12 ## [0.0.11] - 2025-3-12
### Added ### Added
- 新增了 `schedule` 配置项,用于配置日程表生成功能 - 新增了 `schedule` 配置项,用于配置日程表生成功能
- 新增了 `response_spliter` 配置项,用于控制回复分割 - 新增了 `response_splitter` 配置项,用于控制回复分割
- 新增了 `experimental` 配置项,用于实验性功能开关 - 新增了 `experimental` 配置项,用于实验性功能开关
- 新增了 `llm_observation``llm_sub_heartflow` 模型配置 - 新增了 `llm_observation``llm_sub_heartflow` 模型配置
- 新增了 `llm_heartflow` 模型配置 - 新增了 `llm_heartflow` 模型配置

View File

@@ -1,4 +1,4 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.config.config import global_config from src.plugins.config.config import global_config
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from src.plugins.moods.moods import MoodManager from src.plugins.moods.moods import MoodManager
@@ -18,12 +18,11 @@ class ChangeMoodTool(BaseTool):
"type": "object", "type": "object",
"properties": { "properties": {
"text": {"type": "string", "description": "引起你改变心情的文本"}, "text": {"type": "string", "description": "引起你改变心情的文本"},
"response_set": {"type": "list", "description": "你对文本的回复"} "response_set": {"type": "list", "description": "你对文本的回复"},
}, },
"required": ["text", "response_set"], "required": ["text", "response_set"],
} }
async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]: async def execute(self, function_args: Dict[str, Any], message_txt: str) -> Dict[str, Any]:
"""执行心情改变 """执行心情改变

View File

@@ -1,33 +1,24 @@
from src.plugins.person_info.relationship_manager import relationship_manager # from src.plugins.person_info.relationship_manager import relationship_manager
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from src.do_tool.tool_can_use.base_tool import BaseTool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator # from src.plugins.chat_module.think_flow_chat.think_flow_generator import ResponseGenerator
logger = get_module_logger("relationship_tool") logger = get_module_logger("relationship_tool")
class RelationshipTool(BaseTool): class RelationshipTool(BaseTool):
name = "change_relationship" name = "change_relationship"
description = "根据收到的文本和回复内容,修改与特定用户的关系值,当你回复了别人的消息,你可以使用这个工具" description = "根据收到的文本和回复内容,修改与特定用户的关系值,当你回复了别人的消息,你可以使用这个工具"
parameters = { parameters = {
"type": "object", "type": "object",
"properties": { "properties": {
"text": { "text": {"type": "string", "description": "收到的文本"},
"type": "string", "changed_value": {"type": "number", "description": "变更值"},
"description": "收到的文本" "reason": {"type": "string", "description": "变更原因"},
}, },
"changed_value": { "required": ["text", "changed_value", "reason"],
"type": "number",
"description": "变更值"
},
"reason": {
"type": "string",
"description": "变更原因"
} }
},
"required": ["text", "changed_value", "reason"]
}
async def execute(self, args: dict, message_txt: str) -> dict: async def execute(self, args: dict, message_txt: str) -> dict:
"""执行工具功能 """执行工具功能

View File

@@ -1,4 +1,4 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from typing import Dict, Any from typing import Dict, Any

View File

@@ -1,4 +1,4 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.chat.utils import get_embedding from src.plugins.chat.utils import get_embedding
from src.common.database import db from src.common.database import db
from src.common.logger import get_module_logger from src.common.logger import get_module_logger

View File

@@ -1,4 +1,4 @@
from src.do_tool.tool_can_use.base_tool import BaseTool, register_tool from src.do_tool.tool_can_use.base_tool import BaseTool
from src.plugins.memory_system.Hippocampus import HippocampusManager from src.plugins.memory_system.Hippocampus import HippocampusManager
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
from typing import Dict, Any from typing import Dict, Any

View File

@@ -178,10 +178,7 @@ class ToolUser:
# 如果有工具结果,返回结构化的信息 # 如果有工具结果,返回结构化的信息
if structured_info: if structured_info:
logger.info(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}") logger.info(f"工具调用收集到结构化信息: {json.dumps(structured_info, ensure_ascii=False)}")
return { return {"used_tools": True, "structured_info": structured_info}
"used_tools": True,
"structured_info": structured_info
}
else: else:
# 没有工具调用 # 没有工具调用
content, reasoning_content = response content, reasoning_content = response

View File

@@ -98,8 +98,6 @@ class SubHeartflow:
self.bot_name = global_config.BOT_NICKNAME self.bot_name = global_config.BOT_NICKNAME
def add_observation(self, observation: Observation): def add_observation(self, observation: Observation):
"""添加一个新的observation对象到列表中如果已存在相同id的observation则不添加""" """添加一个新的observation对象到列表中如果已存在相同id的observation则不添加"""
# 查找是否存在相同id的observation # 查找是否存在相同id的observation

View File

@@ -341,7 +341,7 @@ def process_llm_response(text: str) -> List[str]:
tone_error_rate=global_config.chinese_typo_tone_error_rate, tone_error_rate=global_config.chinese_typo_tone_error_rate,
word_replace_rate=global_config.chinese_typo_word_replace_rate, word_replace_rate=global_config.chinese_typo_word_replace_rate,
) )
if global_config.enable_response_spliter: if global_config.enable_response_splitter:
split_sentences = split_into_sentences_w_remove_punctuation(text) split_sentences = split_into_sentences_w_remove_punctuation(text)
else: else:
split_sentences = [text] split_sentences = [text]

View File

@@ -136,8 +136,6 @@ class ThinkFlowChat:
message_manager.add_message(bot_message) message_manager.add_message(bot_message)
async def _update_relationship(self, message: MessageRecv, response_set): async def _update_relationship(self, message: MessageRecv, response_set):
"""更新关系情绪""" """更新关系情绪"""
ori_response = ",".join(response_set) ori_response = ",".join(response_set)
@@ -382,7 +380,6 @@ class ThinkFlowChat:
except Exception as e: except Exception as e:
logger.error(f"心流思考后脑内状态更新失败: {e}") logger.error(f"心流思考后脑内状态更新失败: {e}")
# 回复后处理 # 回复后处理
await willing_manager.after_generate_reply_handle(message.message_info.message_id) await willing_manager.after_generate_reply_handle(message.message_info.message_id)

View File

@@ -226,7 +226,6 @@ class ResponseGenerator:
logger.debug(f"获取情感标签时出错: {e}") logger.debug(f"获取情感标签时出错: {e}")
return "中立", "平静" # 出错时返回默认值 return "中立", "平静" # 出错时返回默认值
async def _get_emotion_tags_with_reason(self, content: str, processed_plain_text: str, reason: str): async def _get_emotion_tags_with_reason(self, content: str, processed_plain_text: str, reason: str):
"""提取情感标签,结合立场和情绪""" """提取情感标签,结合立场和情绪"""
try: try:

View File

@@ -253,8 +253,8 @@ class BotConfig:
chinese_typo_tone_error_rate = 0.2 # 声调错误概率 chinese_typo_tone_error_rate = 0.2 # 声调错误概率
chinese_typo_word_replace_rate = 0.02 # 整词替换概率 chinese_typo_word_replace_rate = 0.02 # 整词替换概率
# response_spliter # response_splitter
enable_response_spliter = True # 是否启用回复分割器 enable_response_splitter = True # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度 response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 3 # 回复允许的最大句子数 response_max_sentence_num = 3 # 回复允许的最大句子数
@@ -604,13 +604,13 @@ class BotConfig:
"word_replace_rate", config.chinese_typo_word_replace_rate "word_replace_rate", config.chinese_typo_word_replace_rate
) )
def response_spliter(parent: dict): def response_splitter(parent: dict):
response_spliter_config = parent["response_spliter"] response_splitter_config = parent["response_splitter"]
config.enable_response_spliter = response_spliter_config.get( config.enable_response_splitter = response_splitter_config.get(
"enable_response_spliter", config.enable_response_spliter "enable_response_splitter", config.enable_response_splitter
) )
config.response_max_length = response_spliter_config.get("response_max_length", config.response_max_length) config.response_max_length = response_splitter_config.get("response_max_length", config.response_max_length)
config.response_max_sentence_num = response_spliter_config.get( config.response_max_sentence_num = response_splitter_config.get(
"response_max_sentence_num", config.response_max_sentence_num "response_max_sentence_num", config.response_max_sentence_num
) )
@@ -664,7 +664,7 @@ class BotConfig:
"keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False}, "keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False},
"chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False}, "chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False},
"platforms": {"func": platforms, "support": ">=1.0.0"}, "platforms": {"func": platforms, "support": ">=1.0.0"},
"response_spliter": {"func": response_spliter, "support": ">=0.0.11", "necessary": False}, "response_splitter": {"func": response_splitter, "support": ">=0.0.11", "necessary": False},
"experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False}, "experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False},
"heartflow": {"func": heartflow, "support": ">=1.0.2", "necessary": False}, "heartflow": {"func": heartflow, "support": ">=1.0.2", "necessary": False},
} }

View File

@@ -162,7 +162,9 @@ class RelationshipManager:
return chat_stream.user_info.user_nickname, value, relationship_level[level_num] return chat_stream.user_info.user_nickname, value, relationship_level[level_num]
async def calculate_update_relationship_value_with_reason(self, chat_stream: ChatStream, label: str, stance: str, reason: str) -> tuple: async def calculate_update_relationship_value_with_reason(
self, chat_stream: ChatStream, label: str, stance: str, reason: str
) -> tuple:
"""计算并变更关系值 """计算并变更关系值
新的关系值变更计算方式: 新的关系值变更计算方式:
将关系值限定在-1000到1000 将关系值限定在-1000到1000

View File

@@ -29,7 +29,7 @@ SECTION_TRANSLATIONS = {
"mood": "情绪设置", "mood": "情绪设置",
"keywords_reaction": "关键词反应", "keywords_reaction": "关键词反应",
"chinese_typo": "中文错别字", "chinese_typo": "中文错别字",
"response_spliter": "回复分割器", "response_splitter": "回复分割器",
"remote": "远程设置", "remote": "远程设置",
"experimental": "实验功能", "experimental": "实验功能",
"model": "模型设置", "model": "模型设置",
@@ -116,9 +116,9 @@ CONFIG_DESCRIPTIONS = {
"chinese_typo.tone_error_rate": "声调错误概率", "chinese_typo.tone_error_rate": "声调错误概率",
"chinese_typo.word_replace_rate": "整词替换概率", "chinese_typo.word_replace_rate": "整词替换概率",
# 回复分割器 # 回复分割器
"response_spliter.enable_response_spliter": "是否启用回复分割器", "response_splitter.enable_response_splitter": "是否启用回复分割器",
"response_spliter.response_max_length": "回复允许的最大长度", "response_splitter.response_max_length": "回复允许的最大长度",
"response_spliter.response_max_sentence_num": "回复允许的最大句子数", "response_splitter.response_max_sentence_num": "回复允许的最大句子数",
# 远程设置 # 远程设置
"remote.enable": "是否启用远程统计", "remote.enable": "是否启用远程统计",
# 实验功能 # 实验功能

View File

@@ -1,5 +1,5 @@
[inner] [inner]
version = "1.2.6" version = "1.2.7"
#以下是给开发人员阅读的,一般用户不需要阅读 #以下是给开发人员阅读的,一般用户不需要阅读
@@ -159,8 +159,8 @@ min_freq=9 # 最小字频阈值
tone_error_rate=0.1 # 声调错误概率 tone_error_rate=0.1 # 声调错误概率
word_replace_rate=0.006 # 整词替换概率 word_replace_rate=0.006 # 整词替换概率
[response_spliter] [response_splitter]
enable_response_spliter = true # 是否启用回复分割器 enable_response_splitter = true # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度 response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 4 # 回复允许的最大句子数 response_max_sentence_num = 4 # 回复允许的最大句子数