better:重整配置,分离表达,聊天模式区分

重整配置文件路径,添加更多配置选项
分离了人设表达方式和学习到的表达方式
将聊天模式区分为normal focus和auto
This commit is contained in:
SengokuCola
2025-05-20 22:41:55 +08:00
parent 67569f1fa6
commit 25d9032e62
54 changed files with 387 additions and 482 deletions

View File

@@ -17,7 +17,7 @@ from src.manager.mood_manager import mood_manager
from src.chat.heart_flow.utils_chat import get_chat_type_and_target_info
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp
from src.individuality.individuality import Individuality
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
import time
@@ -281,7 +281,6 @@ class DefaultExpressor:
in_mind_reply,
target_message,
) -> str:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(x_person=0, level=2)
# Determine if it's a group chat
@@ -294,7 +293,7 @@ class DefaultExpressor:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.chat.observation_context_size,
limit=global_config.focus_chat.observation_context_size,
)
chat_talking_prompt = await build_readable_messages(
message_list_before_now,

View File

@@ -36,24 +36,6 @@ def init_prompt() -> None:
"""
Prompt(learn_style_prompt, "learn_style_prompt")
personality_expression_prompt = """
{personality}
请从以上人设中总结出这个角色可能的语言风格
思考回复的特殊内容和情感
思考有没有特殊的梗,一并总结成语言风格
总结成如下格式的规律,总结的内容要详细,但具有概括性:
"xxx"时,可以"xxx", xxx不超过10个字
例如:
"表示十分惊叹"时,使用"我嘞个xxxx"
"表示讽刺的赞同,不想讲道理"时,使用"对对对"
"想说明某个观点,但懒得明说",使用"懂的都懂"
现在请你概括
"""
Prompt(personality_expression_prompt, "personality_expression_prompt")
learn_grammar_prompt = """
{chat_str}
@@ -278,44 +260,6 @@ class ExpressionLearner:
expressions.append((chat_id, situation, style))
return expressions
async def extract_and_store_personality_expressions(self):
"""
检查data/expression/personality目录不存在则创建。
用peronality变量作为chat_str调用LLM生成表达风格解析后count=100存储到expressions.json。
"""
dir_path = os.path.join("data", "expression", "personality")
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, "expressions.json")
# 构建prompt
prompt = await global_prompt_manager.format_prompt(
"personality_expression_prompt",
personality=global_config.personality.expression_style,
)
# logger.info(f"个性表达方式提取prompt: {prompt}")
try:
response, _ = await self.express_learn_model.generate_response_async(prompt)
except Exception as e:
logger.error(f"个性表达方式提取失败: {e}")
return
logger.info(f"个性表达方式提取response: {response}")
# chat_id用personality
expressions = self.parse_expression_response(response, "personality")
# 转为dict并count=100
result = []
for _, situation, style in expressions:
result.append({"situation": situation, "style": style, "count": 100})
# 超过50条时随机删除多余的只保留50条
if len(result) > 50:
remove_count = len(result) - 50
remove_indices = set(random.sample(range(len(result)), remove_count))
result = [item for idx, item in enumerate(result) if idx not in remove_indices]
with open(file_path, "w", encoding="utf-8") as f:
json.dump(result, f, ensure_ascii=False, indent=2)
logger.info(f"已写入{len(result)}条表达到{file_path}")
init_prompt()

View File

@@ -3,7 +3,7 @@ import contextlib
import time
import traceback
from collections import deque
from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
from typing import List, Optional, Dict, Any, Deque
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.message_receive.chat_stream import chat_manager
from rich.traceback import install

View File

@@ -5,7 +5,6 @@ from ...config.config import global_config
from ..message_receive.message import MessageRecv
from ..message_receive.storage import MessageStorage
from ..utils.utils import is_mentioned_bot_in_message
from maim_message import Seg
from src.chat.heart_flow.heartflow import heartflow
from src.common.logger_manager import get_logger
from ..message_receive.chat_stream import chat_manager
@@ -79,26 +78,26 @@ async def _calculate_interest(message: MessageRecv) -> Tuple[float, bool]:
return interested_rate, is_mentioned
def _get_message_type(message: MessageRecv) -> str:
"""获取消息类型
# def _get_message_type(message: MessageRecv) -> str:
# """获取消息类型
Args:
message: 消息对象
# Args:
# message: 消息对象
Returns:
str: 消息类型
"""
if message.message_segment.type != "seglist":
return message.message_segment.type
# Returns:
# str: 消息类型
# """
# if message.message_segment.type != "seglist":
# return message.message_segment.type
if (
isinstance(message.message_segment.data, list)
and all(isinstance(x, Seg) for x in message.message_segment.data)
and len(message.message_segment.data) == 1
):
return message.message_segment.data[0].type
# if (
# isinstance(message.message_segment.data, list)
# and all(isinstance(x, Seg) for x in message.message_segment.data)
# and len(message.message_segment.data) == 1
# ):
# return message.message_segment.data[0].type
return "seglist"
# return "seglist"
def _check_ban_words(text: str, chat, userinfo) -> bool:
@@ -141,7 +140,7 @@ def _check_ban_regex(text: str, chat, userinfo) -> bool:
return False
class HeartFCProcessor:
class HeartFCMessageReceiver:
"""心流处理器,负责处理接收到的消息并计算兴趣度"""
def __init__(self):

View File

@@ -1,6 +1,6 @@
from src.config.config import global_config
from src.common.logger_manager import get_logger
from src.individuality.individuality import Individuality
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
from src.chat.person_info.relationship_manager import relationship_manager
@@ -103,7 +103,6 @@ class PromptBuilder:
return None
async def _build_prompt_normal(self, chat_stream, message_txt: str, sender_name: str = "某人") -> str:
individuality = Individuality.get_instance()
prompt_personality = individuality.get_prompt(x_person=2, level=2)
is_group_chat = bool(chat_stream.group_info)
@@ -112,7 +111,7 @@ class PromptBuilder:
who_chat_in_group = get_recent_group_speaker(
chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id) if chat_stream.user_info else None,
limit=global_config.chat.observation_context_size,
limit=global_config.focus_chat.observation_context_size,
)
elif chat_stream.user_info:
who_chat_in_group.append(
@@ -160,7 +159,7 @@ class PromptBuilder:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.chat.observation_context_size,
limit=global_config.focus_chat.observation_context_size,
)
chat_talking_prompt = await build_readable_messages(
message_list_before_now,

View File

@@ -80,4 +80,4 @@ class ActionInfo(InfoBase):
Returns:
bool: 如果有任何动作需要添加或移除则返回True
"""
return bool(self.get_add_actions() or self.get_remove_actions())
return bool(self.get_add_actions() or self.get_remove_actions())

View File

@@ -1,14 +1,10 @@
from typing import List, Optional, Any
from src.chat.focus_chat.info.obs_info import ObsInfo
from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.info.info_base import InfoBase
from src.chat.focus_chat.info.action_info import ActionInfo
from .base_processor import BaseProcessor
from src.common.logger_manager import get_logger
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.observation.hfcloop_observation import HFCloopObservation
from src.chat.focus_chat.info.cycle_info import CycleInfo
from datetime import datetime
from typing import Dict
from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
@@ -55,10 +51,7 @@ class ActionProcessor(BaseProcessor):
# 处理Observation对象
if observations:
for obs in observations:
if isinstance(obs, HFCloopObservation):
# 创建动作信息
action_info = ActionInfo()
action_changes = await self.analyze_loop_actions(obs)
@@ -75,7 +68,6 @@ class ActionProcessor(BaseProcessor):
return processed_infos
async def analyze_loop_actions(self, obs: HFCloopObservation) -> Dict[str, List[str]]:
"""分析最近的循环内容并决定动作的增减
@@ -87,29 +79,29 @@ class ActionProcessor(BaseProcessor):
}
"""
result = {"add": [], "remove": []}
# 获取最近10次循环
recent_cycles = obs.history_loop[-10:] if len(obs.history_loop) > 10 else obs.history_loop
if not recent_cycles:
return result
# 统计no_reply的数量
no_reply_count = 0
reply_sequence = [] # 记录最近的动作序列
for cycle in recent_cycles:
action_type = cycle.loop_plan_info["action_result"]["action_type"]
if action_type == "no_reply":
no_reply_count += 1
reply_sequence.append(action_type == "reply")
# 检查no_reply比例
if len(recent_cycles) >= 5 and (no_reply_count / len(recent_cycles)) >= 0.8:
result["add"].append("exit_focus_chat")
# 获取最近三次的reply状态
last_three = reply_sequence[-3:] if len(reply_sequence) >= 3 else reply_sequence
# 根据最近的reply情况决定是否移除reply动作
if len(last_three) >= 3 and all(last_three):
# 如果最近三次都是reply直接移除
@@ -122,5 +114,5 @@ class ActionProcessor(BaseProcessor):
# 如果最近一次是reply20%概率移除
if random.random() < 0.2:
result["remove"].append("reply")
return result

View File

@@ -5,7 +5,7 @@ from src.config.config import global_config
import time
import traceback
from src.common.logger_manager import get_logger
from src.individuality.individuality import Individuality
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.utils.json_utils import safe_json_dumps
from src.chat.message_receive.chat_stream import chat_manager
@@ -194,7 +194,6 @@ class MindProcessor(BaseProcessor):
# ---------- 3. 准备个性化数据 ----------
# 获取个性化信息
individuality = Individuality.get_instance()
relation_prompt = ""
for person in person_list:

View File

@@ -5,7 +5,7 @@ from src.config.config import global_config
import time
import traceback
from src.common.logger_manager import get_logger
from src.individuality.individuality import Individuality
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.chat.message_receive.chat_stream import chat_manager
from src.chat.person_info.relationship_manager import relationship_manager
@@ -21,8 +21,9 @@ logger = get_logger("processor")
def init_prompt():
indentify_prompt = """
你的名字是{bot_name},你是一个AI但是你伪装成了一个人类你的人格是{prompt_personality}
的头像形象是一只橙色的鱼,头上有绿色的树叶
{name_block}
是一个AI但是你伪装成了一个人类你的人格是{prompt_personality}
{indentify_block}
{relation_prompt}
{memory_str}
@@ -125,16 +126,22 @@ class SelfProcessor(BaseProcessor):
# hfcloop_observe_info = observation.get_observe_info()
pass
individuality = Individuality.get_instance()
personality_block = individuality.get_prompt(x_person=2, level=2)
nickname_str = ""
for nicknames in global_config.bot.alias_names:
nickname_str += f"{nicknames},"
name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。"
personality_block = individuality.get_personality_prompt(x_person=2, level=2)
identity_block = individuality.get_identity_prompt(x_person=2, level=2)
relation_prompt = ""
for person in person_list:
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
prompt = (await global_prompt_manager.get_prompt_async("indentify_prompt")).format(
bot_name=individuality.name,
name_block=name_block,
prompt_personality=personality_block,
indentify_block=identity_block,
memory_str=memory_str,
relation_prompt=relation_prompt,
time_now=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),

View File

@@ -3,7 +3,7 @@ from src.chat.models.utils_model import LLMRequest
from src.config.config import global_config
import time
from src.common.logger_manager import get_logger
from src.individuality.individuality import Individuality
from src.individuality.individuality import individuality
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.tools.tool_use import ToolUser
from src.chat.utils.json_utils import process_llm_tool_calls
@@ -133,7 +133,7 @@ class ToolProcessor(BaseProcessor):
relation_prompt += await relationship_manager.build_relationship_info(person, is_id=True)
# 获取个性信息
individuality = Individuality.get_instance()
# prompt_personality = individuality.get_prompt(x_person=2, level=2)
# 获取时间信息

View File

@@ -1,9 +1,8 @@
from typing import Dict, List, Optional, Callable, Coroutine, Type, Any
from typing import Dict, List, Optional, Type, Any
from src.chat.focus_chat.planners.actions.base_action import BaseAction, _ACTION_REGISTRY
from src.chat.heart_flow.observation.observation import Observation
from src.chat.focus_chat.expressors.default_expressor import DefaultExpressor
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.focus_chat.heartFC_Cycleinfo import CycleDetail
from src.common.logger_manager import get_logger
import importlib
import pkgutil

View File

@@ -1,12 +1,9 @@
import asyncio
import traceback
from src.common.logger_manager import get_logger
from src.chat.utils.timer_calculator import Timer
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List, Callable, Coroutine
from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.heart_flow.sub_heartflow import SubHeartFlow
from src.chat.message_receive.chat_stream import ChatStream
from src.chat.heart_flow.heartflow import heartflow
from src.chat.heart_flow.sub_heartflow import ChatState
@@ -61,8 +58,6 @@ class ExitFocusChatAction(BaseAction):
self._shutting_down = shutting_down
self.chat_id = chat_stream.stream_id
async def handle_action(self) -> Tuple[bool, str]:
"""
处理退出专注聊天的情况
@@ -95,7 +90,6 @@ class ExitFocusChatAction(BaseAction):
logger.warning(f"{self.log_prefix} {warning_msg}")
return False, warning_msg
return True, status_message
except asyncio.CancelledError:
@@ -105,4 +99,4 @@ class ExitFocusChatAction(BaseAction):
error_msg = f"处理 'exit_focus_chat' 时发生错误: {str(e)}"
logger.error(f"{self.log_prefix} {error_msg}")
logger.error(traceback.format_exc())
return False, error_msg
return False, error_msg

View File

@@ -3,7 +3,7 @@ import traceback
from src.common.logger_manager import get_logger
from src.chat.utils.timer_calculator import Timer
from src.chat.focus_chat.planners.actions.base_action import BaseAction, register_action
from typing import Tuple, List, Callable, Coroutine
from typing import Tuple, List
from src.chat.heart_flow.observation.observation import Observation
from src.chat.heart_flow.observation.chatting_observation import ChattingObservation
from src.chat.focus_chat.hfc_utils import parse_thinking_id_to_timestamp

View File

@@ -12,7 +12,7 @@ from src.chat.focus_chat.info.action_info import ActionInfo
from src.chat.focus_chat.info.structured_info import StructuredInfo
from src.common.logger_manager import get_logger
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.individuality.individuality import Individuality
from src.individuality.individuality import individuality
from src.chat.focus_chat.planners.action_manager import ActionManager
logger = get_logger("planner")
@@ -92,37 +92,37 @@ class ActionPlanner:
try:
# 获取观察信息
extra_info: list[str] = []
# 首先处理动作变更
for info in all_plan_info:
if isinstance(info, ActionInfo) and info.has_changes():
add_actions = info.get_add_actions()
remove_actions = info.get_remove_actions()
reason = info.get_reason()
# 处理动作的增加
for action_name in add_actions:
if action_name in self.action_manager.get_registered_actions():
self.action_manager.add_action_to_using(action_name)
logger.debug(f"{self.log_prefix}添加动作: {action_name}, 原因: {reason}")
# 处理动作的移除
for action_name in remove_actions:
self.action_manager.remove_action_from_using(action_name)
logger.debug(f"{self.log_prefix}移除动作: {action_name}, 原因: {reason}")
# 如果当前选择的动作被移除了更新为no_reply
if action in remove_actions:
action = "no_reply"
reasoning = f"之前选择的动作{action}已被移除,原因: {reason}"
# 继续处理其他信息
for info in all_plan_info:
if isinstance(info, ObsInfo):
observed_messages = info.get_talking_message()
observed_messages_str = info.get_talking_message_str_truncate()
chat_type = info.get_chat_type()
is_group_chat = (chat_type == "group")
is_group_chat = chat_type == "group"
elif isinstance(info, MindInfo):
current_mind = info.get_current_mind()
elif isinstance(info, CycleInfo):
@@ -134,20 +134,16 @@ class ActionPlanner:
# 获取当前可用的动作
current_available_actions = self.action_manager.get_using_actions()
# 如果没有可用动作直接返回no_reply
if not current_available_actions:
logger.warning(f"{self.log_prefix}没有可用的动作将使用no_reply")
action = "no_reply"
reasoning = "没有可用的动作"
return {
"action_result": {
"action_type": action,
"action_data": action_data,
"reasoning": reasoning
},
"action_result": {"action_type": action, "action_data": action_data, "reasoning": reasoning},
"current_mind": current_mind,
"observed_messages": observed_messages
"observed_messages": observed_messages,
}
# --- 构建提示词 (调用修改后的 PromptBuilder 方法) ---
@@ -271,7 +267,6 @@ class ActionPlanner:
else:
mind_info_block = "你刚参与聊天"
individuality = Individuality.get_instance()
personality_block = individuality.get_prompt(x_person=2, level=2)
action_options_block = ""