better:整理config

This commit is contained in:
SengokuCola
2025-04-30 17:18:14 +08:00
parent 3ed5835937
commit b8736e4299
24 changed files with 484 additions and 462 deletions

View File

@@ -99,15 +99,20 @@ class ChatBot:
template_group_name = None
async def preprocess():
logger.trace("开始预处理消息...")
# 如果在私聊中
if groupinfo is None:
logger.trace("检测到私聊消息")
# 是否在配置信息中开启私聊模式
if global_config.enable_friend_chat:
logger.trace("私聊模式已启用")
# 是否进入PFC
if global_config.enable_pfc_chatting:
logger.trace("进入PFC私聊处理流程")
userinfo = message.message_info.user_info
messageinfo = message.message_info
# 创建聊天流
logger.trace(f"{userinfo.user_id}创建/获取聊天流")
chat = await chat_manager.get_or_create_stream(
platform=messageinfo.platform,
user_info=userinfo,
@@ -118,9 +123,11 @@ class ChatBot:
await self._create_pfc_chat(message)
# 禁止PFC进入普通的心流消息处理逻辑
else:
logger.trace("进入普通心流私聊处理")
await self.heartflow_processor.process_message(message_data)
# 群聊默认进入心流消息处理逻辑
else:
logger.trace(f"检测到群聊消息群ID: {groupinfo.group_id}")
await self.heartflow_processor.process_message(message_data)
if template_group_name:

View File

@@ -732,6 +732,9 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
return f"{int(diff / 86400)}天前:\n"
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":\n"
elif mode == "lite":
# 只返回时分秒格式,喵~
return time.strftime("%H:%M:%S", time.localtime(timestamp))
return None

View File

@@ -30,10 +30,12 @@ from src.plugins.moods.moods import MoodManager
from src.individuality.individuality import Individuality
INITIAL_DURATION = 60.0
WAITING_TIME_THRESHOLD = 300 # 等待新消息时间阈值,单位秒
EMOJI_SEND_PRO = 0.3 # 设置一个概率,比如 30% 才真的发
CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值
logger = get_logger("interest") # Logger Name Changed
@@ -179,8 +181,6 @@ class HeartFChatting:
其生命周期现在由其关联的 SubHeartflow 的 FOCUSED 状态控制。
"""
CONSECUTIVE_NO_REPLY_THRESHOLD = 3 # 连续不回复的阈值
def __init__(
self,
chat_id: str,
@@ -644,14 +644,14 @@ class HeartFChatting:
self._lian_xu_bu_hui_fu_ci_shu += 1
self._lian_xu_deng_dai_shi_jian += dang_qian_deng_dai # 累加等待时间
logger.debug(
f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{self.CONSECUTIVE_NO_REPLY_THRESHOLD}, "
f"{self.log_prefix} 连续不回复计数增加: {self._lian_xu_bu_hui_fu_ci_shu}/{CONSECUTIVE_NO_REPLY_THRESHOLD}, "
f"本次等待: {dang_qian_deng_dai:.2f}秒, 累计等待: {self._lian_xu_deng_dai_shi_jian:.2f}"
)
# 检查是否同时达到次数和时间阈值
time_threshold = 0.66 * WAITING_TIME_THRESHOLD * self.CONSECUTIVE_NO_REPLY_THRESHOLD
time_threshold = 0.66 * WAITING_TIME_THRESHOLD * CONSECUTIVE_NO_REPLY_THRESHOLD
if (
self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD
self._lian_xu_bu_hui_fu_ci_shu >= CONSECUTIVE_NO_REPLY_THRESHOLD
and self._lian_xu_deng_dai_shi_jian >= time_threshold
):
logger.info(
@@ -661,7 +661,7 @@ class HeartFChatting:
)
# 调用回调。注意:这里不重置计数器和时间,依赖回调函数成功改变状态来隐式重置上下文。
await self.on_consecutive_no_reply_callback()
elif self._lian_xu_bu_hui_fu_ci_shu >= self.CONSECUTIVE_NO_REPLY_THRESHOLD:
elif self._lian_xu_bu_hui_fu_ci_shu >= CONSECUTIVE_NO_REPLY_THRESHOLD:
# 仅次数达到阈值,但时间未达到
logger.debug(
f"{self.log_prefix} 连续不回复次数达到阈值 ({self._lian_xu_bu_hui_fu_ci_shu}次) "
@@ -979,6 +979,20 @@ class HeartFChatting:
f"{self.log_prefix}[Planner] 恢复了原始动作集, 当前可用: {list(self.action_manager.get_available_actions().keys())}"
)
# --- 结束:确保动作恢复 ---
# --- 新增:概率性忽略文本回复附带的表情(正确的位置)---
if action == "text_reply" and emoji_query:
logger.debug(f"{self.log_prefix}[Planner] 大模型想让麦麦发文字时带表情: '{emoji_query}'")
# 掷骰子看看要不要听它的
if random.random() > EMOJI_SEND_PRO:
logger.info(f"{self.log_prefix}[Planner] 但是麦麦这次不想加表情 ({1-EMOJI_SEND_PRO:.0%}),忽略表情 '{emoji_query}'")
emoji_query = "" # 把表情请求清空,就不发了
else:
logger.info(f"{self.log_prefix}[Planner] 好吧,加上表情 '{emoji_query}'")
# --- 结束:概率性忽略 ---
# --- 结束 LLM 决策 --- #
return {

View File

@@ -174,7 +174,7 @@ class PromptBuilder:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.MAX_CONTEXT_SIZE,
limit=global_config.observation_context_size,
)
chat_talking_prompt = await build_readable_messages(
@@ -241,6 +241,8 @@ class PromptBuilder:
prompt_ger=prompt_ger,
moderation_prompt=await global_prompt_manager.get_prompt_async("moderation_prompt"),
)
logger.debug(f"focus_chat_prompt: \n{prompt}")
return prompt
@@ -255,7 +257,7 @@ class PromptBuilder:
who_chat_in_group += get_recent_group_speaker(
chat_stream.stream_id,
(chat_stream.user_info.platform, chat_stream.user_info.user_id),
limit=global_config.MAX_CONTEXT_SIZE,
limit=global_config.observation_context_size,
)
relation_prompt = ""
@@ -314,7 +316,7 @@ class PromptBuilder:
message_list_before_now = get_raw_msg_before_timestamp_with_chat(
chat_id=chat_stream.stream_id,
timestamp=time.time(),
limit=global_config.MAX_CONTEXT_SIZE,
limit=global_config.observation_context_size,
)
chat_talking_prompt = await build_readable_messages(

View File

@@ -43,6 +43,8 @@ class NormalChat:
self.mood_manager = MoodManager.get_instance() # MoodManager 保持单例
# 存储此实例的兴趣监控任务
self.start_time = time.time()
self.last_speak_time = 0
self._chat_task: Optional[asyncio.Task] = None
logger.info(f"[{self.stream_name}] NormalChat 实例初始化完成。")
@@ -119,6 +121,8 @@ class NormalChat:
await message_manager.add_message(message_set)
self.last_speak_time = time.time()
return first_bot_msg
# 改为实例方法

View File

@@ -632,7 +632,7 @@ class LLMRequest:
**params_copy,
}
if "max_tokens" not in payload and "max_completion_tokens" not in payload:
payload["max_tokens"] = global_config.max_response_length
payload["max_tokens"] = global_config.model_max_output_length
# 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
payload["max_completion_tokens"] = payload.pop("max_tokens")

View File

@@ -282,10 +282,10 @@ class RelationshipManager:
if is_id:
person_id = person
else:
print(f"person: {person}")
# print(f"person: {person}")
person_id = person_info_manager.get_person_id(person[0], person[1])
person_name = await person_info_manager.get_value(person_id, "person_name")
print(f"person_name: {person_name}")
# print(f"person_name: {person_name}")
relationship_value = await person_info_manager.get_value(person_id, "relationship_value")
level_num = self.calculate_level_num(relationship_value)

View File

@@ -8,13 +8,12 @@ from typing import List
class InfoCatcher:
def __init__(self):
self.chat_history = [] # 聊天历史,长度为三倍使用的上下文
self.context_length = global_config.MAX_CONTEXT_SIZE
self.chat_history_in_thinking = [] # 思考期间的聊天内容
self.chat_history_after_response = [] # 回复后的聊天内容,长度为一倍上下文
self.chat_history = [] # 聊天历史,长度为三倍使用的上下文喵~
self.context_length = global_config.observation_context_size
self.chat_history_in_thinking = [] # 思考期间的聊天内容喵~
self.chat_history_after_response = [] # 回复后的聊天内容,长度为一倍上下文喵~
self.chat_id = ""
self.response_mode = global_config.response_mode
self.trigger_response_text = ""
self.response_text = ""
@@ -36,10 +35,10 @@ class InfoCatcher:
"model": "",
}
# 使用字典来存储 reasoning 模式的数据
# 使用字典来存储 reasoning 模式的数据喵~
self.reasoning_data = {"thinking_log": "", "prompt": "", "response": "", "model": ""}
# 耗时
# 耗时喵~
self.timing_results = {
"interested_rate_time": 0,
"sub_heartflow_observe_time": 0,
@@ -73,15 +72,25 @@ class InfoCatcher:
self.heartflow_data["sub_heartflow_now"] = current_mind
def catch_after_llm_generated(self, prompt: str, response: str, reasoning_content: str = "", model_name: str = ""):
if self.response_mode == "heart_flow":
self.heartflow_data["prompt"] = prompt
self.heartflow_data["response"] = response
self.heartflow_data["model"] = model_name
elif self.response_mode == "reasoning":
self.reasoning_data["thinking_log"] = reasoning_content
self.reasoning_data["prompt"] = prompt
self.reasoning_data["response"] = response
self.reasoning_data["model"] = model_name
# if self.response_mode == "heart_flow": # 条件判断不需要了喵~
# self.heartflow_data["prompt"] = prompt
# self.heartflow_data["response"] = response
# self.heartflow_data["model"] = model_name
# elif self.response_mode == "reasoning": # 条件判断不需要了喵~
# self.reasoning_data["thinking_log"] = reasoning_content
# self.reasoning_data["prompt"] = prompt
# self.reasoning_data["response"] = response
# self.reasoning_data["model"] = model_name
# 直接记录信息喵~
self.reasoning_data["thinking_log"] = reasoning_content
self.reasoning_data["prompt"] = prompt
self.reasoning_data["response"] = response
self.reasoning_data["model"] = model_name
# 如果 heartflow 数据也需要通用字段,可以取消下面的注释喵~
# self.heartflow_data["prompt"] = prompt
# self.heartflow_data["response"] = response
# self.heartflow_data["model"] = model_name
self.response_text = response
@@ -172,13 +181,13 @@ class InfoCatcher:
}
def done_catch(self):
"""将收集到的信息存储到数据库的 thinking_log 集合中"""
"""将收集到的信息存储到数据库的 thinking_log 集合中喵~"""
try:
# 将消息对象转换为可序列化的字典
# 将消息对象转换为可序列化的字典喵~
thinking_log_data = {
"chat_id": self.chat_id,
"response_mode": self.response_mode,
# "response_mode": self.response_mode, # 这个也删掉喵~
"trigger_text": self.trigger_response_text,
"response_text": self.response_text,
"trigger_info": {
@@ -195,18 +204,20 @@ class InfoCatcher:
"chat_history_after_response": self.message_list_to_dict(self.chat_history_after_response),
}
# 根据不同的响应模式添加相应的数据
if self.response_mode == "heart_flow":
thinking_log_data["mode_specific_data"] = self.heartflow_data
elif self.response_mode == "reasoning":
thinking_log_data["mode_specific_data"] = self.reasoning_data
# 根据不同的响应模式添加相应的数据喵~ # 现在直接都加上去好了喵~
# if self.response_mode == "heart_flow":
# thinking_log_data["mode_specific_data"] = self.heartflow_data
# elif self.response_mode == "reasoning":
# thinking_log_data["mode_specific_data"] = self.reasoning_data
thinking_log_data["heartflow_data"] = self.heartflow_data
thinking_log_data["reasoning_data"] = self.reasoning_data
# 将数据插入到 thinking_log 集合中
# 将数据插入到 thinking_log 集合中喵~
db.thinking_log.insert_one(thinking_log_data)
return True
except Exception as e:
print(f"存储思考日志时出错: {str(e)}")
print(f"存储思考日志时出错: {str(e)} 喵~")
print(traceback.format_exc())
return False