feat:支持回复和at正确解析
This commit is contained in:
@@ -10,7 +10,7 @@ class ChatState(enum.Enum):
|
|||||||
|
|
||||||
class ChatStateInfo:
|
class ChatStateInfo:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.chat_status: ChatState = ChatState.ABSENT
|
self.chat_status: ChatState = ChatState.CHAT
|
||||||
self.current_state_time = 120
|
self.current_state_time = 120
|
||||||
|
|
||||||
self.mood_manager = MoodManager()
|
self.mood_manager = MoodManager()
|
||||||
|
|||||||
@@ -295,9 +295,6 @@ class SubHeartflow:
|
|||||||
def get_all_observations(self) -> list[Observation]:
|
def get_all_observations(self) -> list[Observation]:
|
||||||
return self.observations
|
return self.observations
|
||||||
|
|
||||||
def clear_observations(self):
|
|
||||||
self.observations.clear()
|
|
||||||
|
|
||||||
def _get_primary_observation(self) -> Optional[ChattingObservation]:
|
def _get_primary_observation(self) -> Optional[ChattingObservation]:
|
||||||
if self.observations and isinstance(self.observations[0], ChattingObservation):
|
if self.observations and isinstance(self.observations[0], ChattingObservation):
|
||||||
return self.observations[0]
|
return self.observations[0]
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import time
|
import time
|
||||||
|
import random
|
||||||
from typing import List, Dict, Optional, Any, Tuple, Coroutine
|
from typing import List, Dict, Optional, Any, Tuple, Coroutine
|
||||||
from src.common.logger_manager import get_logger
|
from src.common.logger_manager import get_logger
|
||||||
from src.plugins.models.utils_model import LLMRequest
|
from src.plugins.models.utils_model import LLMRequest
|
||||||
@@ -8,6 +9,9 @@ from src.plugins.heartFC_chat.heartflow_prompt_builder import Prompt, global_pro
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
MAX_EXPRESSION_COUNT = 300
|
||||||
|
|
||||||
logger = get_logger("expressor")
|
logger = get_logger("expressor")
|
||||||
|
|
||||||
|
|
||||||
@@ -52,6 +56,18 @@ class ExpressionLearner:
|
|||||||
expressions: List[dict] = json.load(f)
|
expressions: List[dict] = json.load(f)
|
||||||
return expressions
|
return expressions
|
||||||
|
|
||||||
|
def is_similar(self, s1: str, s2: str) -> bool:
|
||||||
|
"""
|
||||||
|
判断两个字符串是否相似(只考虑长度大于5且有80%以上重合,不考虑子串)
|
||||||
|
"""
|
||||||
|
if not s1 or not s2:
|
||||||
|
return False
|
||||||
|
min_len = min(len(s1), len(s2))
|
||||||
|
if min_len < 5:
|
||||||
|
return False
|
||||||
|
same = sum(1 for a, b in zip(s1, s2) if a == b)
|
||||||
|
return same / min_len > 0.8
|
||||||
|
|
||||||
async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]:
|
async def learn_and_store_expression(self) -> List[Tuple[str, str, str]]:
|
||||||
"""选择从当前到最近1小时内的随机10条消息,然后学习这些消息的表达方式"""
|
"""选择从当前到最近1小时内的随机10条消息,然后学习这些消息的表达方式"""
|
||||||
logger.info("开始学习表达方式...")
|
logger.info("开始学习表达方式...")
|
||||||
@@ -74,15 +90,40 @@ class ExpressionLearner:
|
|||||||
file_path = os.path.join(dir_path, "expressions.json")
|
file_path = os.path.join(dir_path, "expressions.json")
|
||||||
# 若已存在,先读出合并
|
# 若已存在,先读出合并
|
||||||
if os.path.exists(file_path):
|
if os.path.exists(file_path):
|
||||||
old_data: List[Dict[str, str]] = []
|
old_data: List[Dict[str, str, str]] = []
|
||||||
try:
|
try:
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
with open(file_path, "r", encoding="utf-8") as f:
|
||||||
old_data = json.load(f)
|
old_data = json.load(f)
|
||||||
except Exception:
|
except Exception:
|
||||||
old_data = []
|
old_data = []
|
||||||
expr_list = old_data + expr_list
|
else:
|
||||||
|
old_data = []
|
||||||
|
# 超过最大数量时,20%概率移除count=1的项
|
||||||
|
if len(old_data) >= MAX_EXPRESSION_COUNT:
|
||||||
|
delete = True
|
||||||
|
new_old_data = []
|
||||||
|
for item in old_data:
|
||||||
|
if item.get("count", 1) == 1 and random.random() < 0.2:
|
||||||
|
continue # 20%概率移除
|
||||||
|
new_old_data.append(item)
|
||||||
|
old_data = new_old_data
|
||||||
|
# 合并逻辑
|
||||||
|
for new_expr in expr_list:
|
||||||
|
found = False
|
||||||
|
for old_expr in old_data:
|
||||||
|
if self.is_similar(new_expr["situation"], old_expr.get("situation", "")) and self.is_similar(new_expr["style"], old_expr.get("style", "")):
|
||||||
|
found = True
|
||||||
|
# 50%概率替换
|
||||||
|
if random.random() < 0.5:
|
||||||
|
old_expr["situation"] = new_expr["situation"]
|
||||||
|
old_expr["style"] = new_expr["style"]
|
||||||
|
old_expr["count"] = old_expr.get("count", 1) + 1
|
||||||
|
break
|
||||||
|
if not found:
|
||||||
|
new_expr["count"] = 1
|
||||||
|
old_data.append(new_expr)
|
||||||
with open(file_path, "w", encoding="utf-8") as f:
|
with open(file_path, "w", encoding="utf-8") as f:
|
||||||
json.dump(expr_list, f, ensure_ascii=False, indent=2)
|
json.dump(old_data, f, ensure_ascii=False, indent=2)
|
||||||
return expressions
|
return expressions
|
||||||
|
|
||||||
async def learn_expression(self) -> Optional[List[Tuple[str, str, str]]]:
|
async def learn_expression(self) -> Optional[List[Tuple[str, str, str]]]:
|
||||||
|
|||||||
@@ -86,7 +86,7 @@ def init_prompt():
|
|||||||
{{
|
{{
|
||||||
"action": "reply",
|
"action": "reply",
|
||||||
"text": "你想表达的内容",
|
"text": "你想表达的内容",
|
||||||
"emojis": "表情关键词",
|
"emojis": "描述当前使用表情包的场景",
|
||||||
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)",
|
"target": "你想要回复的原始文本内容(非必须,仅文本,不包含发送者)",
|
||||||
"reasoning": "你的决策理由",
|
"reasoning": "你的决策理由",
|
||||||
}}
|
}}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from src.config.config import global_config
|
|||||||
from typing import List, Dict, Any, Tuple # 确保类型提示被导入
|
from typing import List, Dict, Any, Tuple # 确保类型提示被导入
|
||||||
import time # 导入 time 模块以获取当前时间
|
import time # 导入 time 模块以获取当前时间
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
# 导入新的 repository 函数
|
# 导入新的 repository 函数
|
||||||
from src.common.message_repository import find_messages, count_messages
|
from src.common.message_repository import find_messages, count_messages
|
||||||
@@ -215,10 +216,43 @@ async def _build_readable_messages_internal(
|
|||||||
else:
|
else:
|
||||||
person_name = "某人"
|
person_name = "某人"
|
||||||
|
|
||||||
|
# 检查是否有 回复<aaa:bbb> 字段
|
||||||
|
reply_pattern = r"回复<([^:<>]+):([^:<>]+)>"
|
||||||
|
match = re.search(reply_pattern, content)
|
||||||
|
if match:
|
||||||
|
aaa = match.group(1)
|
||||||
|
bbb = match.group(2)
|
||||||
|
reply_person_id = person_info_manager.get_person_id(platform, bbb)
|
||||||
|
reply_person_name = await person_info_manager.get_value(reply_person_id, "person_name")
|
||||||
|
if not reply_person_name:
|
||||||
|
reply_person_name = aaa
|
||||||
|
# 在内容前加上回复信息
|
||||||
|
content = re.sub(reply_pattern, f"回复 {reply_person_name}", content, count=1)
|
||||||
|
|
||||||
|
# 检查是否有 @<aaa:bbb> 字段 @<{member_info.get('nickname')}:{member_info.get('user_id')}>
|
||||||
|
at_pattern = r"@<([^:<>]+):([^:<>]+)>"
|
||||||
|
at_matches = list(re.finditer(at_pattern, content))
|
||||||
|
if at_matches:
|
||||||
|
new_content = ""
|
||||||
|
last_end = 0
|
||||||
|
for m in at_matches:
|
||||||
|
new_content += content[last_end:m.start()]
|
||||||
|
aaa = m.group(1)
|
||||||
|
bbb = m.group(2)
|
||||||
|
at_person_id = person_info_manager.get_person_id(platform, bbb)
|
||||||
|
at_person_name = await person_info_manager.get_value(at_person_id, "person_name")
|
||||||
|
if not at_person_name:
|
||||||
|
at_person_name = aaa
|
||||||
|
new_content += f"@{at_person_name}"
|
||||||
|
last_end = m.end()
|
||||||
|
new_content += content[last_end:]
|
||||||
|
content = new_content
|
||||||
|
|
||||||
message_details_raw.append((timestamp, person_name, content))
|
message_details_raw.append((timestamp, person_name, content))
|
||||||
|
|
||||||
if not message_details_raw:
|
if not message_details_raw:
|
||||||
return "", []
|
return "", []
|
||||||
|
|
||||||
|
|
||||||
message_details_raw.sort(key=lambda x: x[0]) # 按时间戳(第一个元素)升序排序,越早的消息排在前面
|
message_details_raw.sort(key=lambda x: x[0]) # 按时间戳(第一个元素)升序排序,越早的消息排在前面
|
||||||
|
|
||||||
|
|||||||
@@ -66,11 +66,11 @@ time_zone = "Asia/Shanghai" # 给你的机器人设置时区,可以解决运
|
|||||||
nonebot-qq="http://127.0.0.1:18002/api/message"
|
nonebot-qq="http://127.0.0.1:18002/api/message"
|
||||||
|
|
||||||
[chat] #麦麦的聊天通用设置
|
[chat] #麦麦的聊天通用设置
|
||||||
allow_focus_mode = true # 是否允许专注聊天状态
|
allow_focus_mode = false # 是否允许专注聊天状态
|
||||||
# 是否启用heart_flowC(HFC)模式
|
# 是否启用heart_flowC(HFC)模式
|
||||||
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token
|
# 启用后麦麦会自主选择进入heart_flowC模式(持续一段时间),进行主动的观察和回复,并给出回复,比较消耗token
|
||||||
base_normal_chat_num = 8 # 最多允许多少个群进行普通聊天
|
base_normal_chat_num = 999 # 最多允许多少个群进行普通聊天
|
||||||
base_focused_chat_num = 5 # 最多允许多少个群进行专注聊天
|
base_focused_chat_num = 4 # 最多允许多少个群进行专注聊天
|
||||||
|
|
||||||
observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖
|
observation_context_size = 15 # 观察到的最长上下文大小,建议15,太短太长都会导致脑袋尖尖
|
||||||
message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟
|
message_buffer = true # 启用消息缓冲器?启用此项以解决消息的拆分问题,但会使麦麦的回复延迟
|
||||||
|
|||||||
Reference in New Issue
Block a user