🤖 自动格式化代码 [skip ci]
This commit is contained in:
@@ -138,9 +138,11 @@ class MessageBuffer:
|
||||
if msg.message.message_segment.type != "seglist":
|
||||
type = msg.message.message_segment.type
|
||||
else:
|
||||
if (isinstance(msg.message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
|
||||
and len(msg.message.message_segment.data) == 1):
|
||||
if (
|
||||
isinstance(msg.message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
|
||||
and len(msg.message.message_segment.data) == 1
|
||||
):
|
||||
type = msg.message.message_segment.data[0].type
|
||||
combined_text.append(msg.message.processed_plain_text)
|
||||
continue
|
||||
@@ -152,9 +154,11 @@ class MessageBuffer:
|
||||
if msg.message.message_segment.type != "seglist":
|
||||
F_type = msg.message.message_segment.type
|
||||
else:
|
||||
if (isinstance(msg.message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
|
||||
and len(msg.message.message_segment.data) == 1):
|
||||
if (
|
||||
isinstance(msg.message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in msg.message.message_segment.data)
|
||||
and len(msg.message.message_segment.data) == 1
|
||||
):
|
||||
F_type = msg.message.message_segment.data[0].type
|
||||
if hasattr(msg.message, "processed_plain_text") and msg.message.processed_plain_text:
|
||||
if F_type == "text":
|
||||
|
||||
@@ -344,13 +344,13 @@ def process_llm_response(text: str) -> List[str]:
|
||||
# 提取被 () 或 [] 包裹的内容
|
||||
pattern = re.compile(r"[\(\[\(].*?[\)\]\)]")
|
||||
# _extracted_contents = pattern.findall(text)
|
||||
_extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
|
||||
_extracted_contents = pattern.findall(protected_text) # 在保护后的文本上查找
|
||||
|
||||
# 去除 () 和 [] 及其包裹的内容
|
||||
# cleaned_text = pattern.sub("", text)
|
||||
cleaned_text = pattern.sub("", protected_text)
|
||||
|
||||
if cleaned_text == '':
|
||||
|
||||
if cleaned_text == "":
|
||||
return ["呃呃"]
|
||||
|
||||
logger.debug(f"{text}去除括号处理后的文本: {cleaned_text}")
|
||||
@@ -717,19 +717,19 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
|
||||
# normal模式: 直接转换所有时间戳
|
||||
if mode == "normal":
|
||||
result_text = text
|
||||
|
||||
|
||||
# 将时间戳转换为可读格式并记录相同格式的时间戳
|
||||
timestamp_readable_map = {}
|
||||
readable_time_used = set()
|
||||
|
||||
|
||||
for match in matches:
|
||||
timestamp = float(match.group(1))
|
||||
readable_time = translate_timestamp_to_human_readable(timestamp, "normal")
|
||||
timestamp_readable_map[match.group(0)] = (timestamp, readable_time)
|
||||
|
||||
|
||||
# 按时间戳排序
|
||||
sorted_timestamps = sorted(timestamp_readable_map.items(), key=lambda x: x[1][0])
|
||||
|
||||
|
||||
# 执行替换,相同格式的只保留最早的
|
||||
for ts_str, (_, readable) in sorted_timestamps:
|
||||
pattern_instance = re.escape(ts_str)
|
||||
@@ -740,7 +740,7 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
|
||||
# 否则替换为可读时间并记录
|
||||
result_text = re.sub(pattern_instance, readable, result_text, count=1)
|
||||
readable_time_used.add(readable)
|
||||
|
||||
|
||||
return result_text
|
||||
else:
|
||||
# lite模式: 按5秒间隔划分并选择性转换
|
||||
@@ -801,18 +801,18 @@ def parse_text_timestamps(text: str, mode: str = "normal") -> str:
|
||||
|
||||
# 按照时间戳升序排序
|
||||
to_convert.sort(key=lambda x: x[0])
|
||||
|
||||
|
||||
# 将时间戳转换为可读时间并记录哪些可读时间已经使用过
|
||||
converted_timestamps = []
|
||||
readable_time_used = set()
|
||||
|
||||
|
||||
for ts, match in to_convert:
|
||||
readable_time = translate_timestamp_to_human_readable(ts, "relative")
|
||||
converted_timestamps.append((ts, match, readable_time))
|
||||
|
||||
|
||||
# 按照时间戳原始顺序排序,避免替换时位置错误
|
||||
converted_timestamps.sort(key=lambda x: x[1].start())
|
||||
|
||||
|
||||
# 从后向前替换,避免位置改变
|
||||
converted_timestamps.reverse()
|
||||
for match, readable_time in converted_timestamps:
|
||||
|
||||
@@ -194,11 +194,13 @@ class ReasoningChat:
|
||||
willing_manager.delete(message.message_info.message_id)
|
||||
F_type = "seglist"
|
||||
if message.message_segment.type != "seglist":
|
||||
F_type =message.message_segment.type
|
||||
F_type = message.message_segment.type
|
||||
else:
|
||||
if (isinstance(message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in message.message_segment.data)
|
||||
and len(message.message_segment.data) == 1):
|
||||
if (
|
||||
isinstance(message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in message.message_segment.data)
|
||||
and len(message.message_segment.data) == 1
|
||||
):
|
||||
F_type = message.message_segment.data[0].type
|
||||
if F_type == "text":
|
||||
logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}")
|
||||
|
||||
@@ -206,11 +206,13 @@ class ThinkFlowChat:
|
||||
willing_manager.delete(message.message_info.message_id)
|
||||
F_type = "seglist"
|
||||
if message.message_segment.type != "seglist":
|
||||
F_type =message.message_segment.type
|
||||
F_type = message.message_segment.type
|
||||
else:
|
||||
if (isinstance(message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in message.message_segment.data)
|
||||
and len(message.message_segment.data) == 1):
|
||||
if (
|
||||
isinstance(message.message_segment.data, list)
|
||||
and all(isinstance(x, Seg) for x in message.message_segment.data)
|
||||
and len(message.message_segment.data) == 1
|
||||
):
|
||||
F_type = message.message_segment.data[0].type
|
||||
if F_type == "text":
|
||||
logger.info(f"触发缓冲,已炸飞消息:{message.processed_plain_text}")
|
||||
|
||||
@@ -74,4 +74,3 @@ class ClassicalWillingManager(BaseWillingManager):
|
||||
|
||||
async def not_reply_handle(self, message_id):
|
||||
return await super().not_reply_handle(message_id)
|
||||
|
||||
|
||||
@@ -234,4 +234,3 @@ class DynamicWillingManager(BaseWillingManager):
|
||||
|
||||
async def after_generate_reply_handle(self, message_id):
|
||||
return await super().after_generate_reply_handle(message_id)
|
||||
|
||||
|
||||
@@ -10,12 +10,15 @@ llmcheck 模式:
|
||||
目前的使用方式是拓展到其他意愿管理模式
|
||||
|
||||
"""
|
||||
|
||||
import time
|
||||
from loguru import logger
|
||||
from ..models.utils_model import LLM_request
|
||||
from ...config.config import global_config
|
||||
|
||||
# from ..chat.chat_stream import ChatStream
|
||||
from ..chat.utils import get_recent_group_detailed_plain_text
|
||||
|
||||
# from .willing_manager import BaseWillingManager
|
||||
from .mode_mxp import MxpWillingManager
|
||||
import re
|
||||
@@ -28,11 +31,9 @@ def is_continuous_chat(self, message_id: str):
|
||||
chat_id = willing_info.chat_id
|
||||
group_info = willing_info.group_info
|
||||
config = self.global_config
|
||||
length = 5
|
||||
length = 5
|
||||
if chat_id:
|
||||
chat_talking_text = get_recent_group_detailed_plain_text(
|
||||
chat_id, limit=length, combine=True
|
||||
)
|
||||
chat_talking_text = get_recent_group_detailed_plain_text(chat_id, limit=length, combine=True)
|
||||
if group_info:
|
||||
if str(config.BOT_QQ) in chat_talking_text:
|
||||
return True
|
||||
@@ -40,6 +41,7 @@ def is_continuous_chat(self, message_id: str):
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def llmcheck_decorator(trigger_condition_func):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
@@ -50,18 +52,17 @@ def llmcheck_decorator(trigger_condition_func):
|
||||
else:
|
||||
# 不满足条件,走默认流程
|
||||
return func(self, message_id)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
class LlmcheckWillingManager(MxpWillingManager):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.model_v3 = LLM_request(model=global_config.llm_normal, temperature=0.3)
|
||||
|
||||
|
||||
|
||||
async def get_llmreply_probability(self, message_id: str):
|
||||
message_info = self.ongoing_messages[message_id]
|
||||
chat_id = message_info.chat_id
|
||||
@@ -77,9 +78,7 @@ class LlmcheckWillingManager(MxpWillingManager):
|
||||
current_time = time.strftime("%H:%M:%S", time.localtime())
|
||||
chat_talking_prompt = ""
|
||||
if chat_id:
|
||||
chat_talking_prompt = get_recent_group_detailed_plain_text(
|
||||
chat_id, limit=length, combine=True
|
||||
)
|
||||
chat_talking_prompt = get_recent_group_detailed_plain_text(chat_id, limit=length, combine=True)
|
||||
else:
|
||||
return 0
|
||||
|
||||
@@ -100,7 +99,7 @@ class LlmcheckWillingManager(MxpWillingManager):
|
||||
logger.info(f"{content_check} {reasoning_check}")
|
||||
probability = self.extract_marked_probability(content_check)
|
||||
# 兴趣系数修正 无关激活效率太高,暂时停用,待新记忆系统上线后调整
|
||||
probability += (message_info.interested_rate * 0.25)
|
||||
probability += message_info.interested_rate * 0.25
|
||||
probability = min(1.0, probability)
|
||||
if probability <= 0.1:
|
||||
probability = min(0.03, probability)
|
||||
@@ -117,24 +116,24 @@ class LlmcheckWillingManager(MxpWillingManager):
|
||||
def extract_marked_probability(text):
|
||||
"""提取带标记的概率值 该方法主要用于测试微调prompt阶段"""
|
||||
text = text.strip()
|
||||
pattern = r'##PROBABILITY_START##(.*?)##PROBABILITY_END##'
|
||||
pattern = r"##PROBABILITY_START##(.*?)##PROBABILITY_END##"
|
||||
match = re.search(pattern, text, re.DOTALL)
|
||||
if match:
|
||||
prob_str = match.group(1).strip()
|
||||
# 处理百分比(65% → 0.65)
|
||||
if '%' in prob_str:
|
||||
return float(prob_str.replace('%', '')) / 100
|
||||
if "%" in prob_str:
|
||||
return float(prob_str.replace("%", "")) / 100
|
||||
# 处理分数(2/3 → 0.666...)
|
||||
elif '/' in prob_str:
|
||||
numerator, denominator = map(float, prob_str.split('/'))
|
||||
elif "/" in prob_str:
|
||||
numerator, denominator = map(float, prob_str.split("/"))
|
||||
return numerator / denominator
|
||||
# 直接处理小数
|
||||
else:
|
||||
return float(prob_str)
|
||||
|
||||
percent_match = re.search(r'(\d{1,3})%', text) # 65%
|
||||
decimal_match = re.search(r'(0\.\d+|1\.0+)', text) # 0.65
|
||||
fraction_match = re.search(r'(\d+)/(\d+)', text) # 2/3
|
||||
percent_match = re.search(r"(\d{1,3})%", text) # 65%
|
||||
decimal_match = re.search(r"(0\.\d+|1\.0+)", text) # 0.65
|
||||
fraction_match = re.search(r"(\d+)/(\d+)", text) # 2/3
|
||||
try:
|
||||
if percent_match:
|
||||
prob = float(percent_match.group(1)) / 100
|
||||
@@ -155,6 +154,4 @@ class LlmcheckWillingManager(MxpWillingManager):
|
||||
|
||||
@llmcheck_decorator(is_continuous_chat)
|
||||
def get_reply_probability(self, message_id):
|
||||
return super().get_reply_probability(
|
||||
message_id
|
||||
)
|
||||
return super().get_reply_probability(message_id)
|
||||
|
||||
@@ -36,7 +36,9 @@ class MxpWillingManager(BaseWillingManager):
|
||||
self.last_response_person: Dict[str, tuple[str, int]] = {} # 上次回复的用户信息
|
||||
self.temporary_willing: float = 0 # 临时意愿值
|
||||
self.chat_bot_message_time: Dict[str, list[float]] = {} # 聊天流ID: bot已回复消息时间
|
||||
self.chat_fatigue_punishment_list: Dict[str, list[tuple[float, float]]] = {} # 聊天流疲劳惩罚列, 聊天流ID: 惩罚时间列(开始时间,持续时间)
|
||||
self.chat_fatigue_punishment_list: Dict[
|
||||
str, list[tuple[float, float]]
|
||||
] = {} # 聊天流疲劳惩罚列, 聊天流ID: 惩罚时间列(开始时间,持续时间)
|
||||
self.chat_fatigue_willing_attenuation: Dict[str, float] = {} # 聊天流疲劳意愿衰减值
|
||||
|
||||
# 可变参数
|
||||
@@ -70,8 +72,9 @@ class MxpWillingManager(BaseWillingManager):
|
||||
w_info = self.ongoing_messages[message_id]
|
||||
if w_info.chat_id not in self.chat_bot_message_time:
|
||||
self.chat_bot_message_time[w_info.chat_id] = []
|
||||
self.chat_bot_message_time[w_info.chat_id] = \
|
||||
[t for t in self.chat_bot_message_time[w_info.chat_id] if current_time - t < 60]
|
||||
self.chat_bot_message_time[w_info.chat_id] = [
|
||||
t for t in self.chat_bot_message_time[w_info.chat_id] if current_time - t < 60
|
||||
]
|
||||
self.chat_bot_message_time[w_info.chat_id].append(current_time)
|
||||
if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num):
|
||||
time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
|
||||
@@ -127,7 +130,9 @@ class MxpWillingManager(BaseWillingManager):
|
||||
if w_info.interested_rate > 0:
|
||||
current_willing += math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"兴趣增益:{math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain}")
|
||||
self.logger.debug(
|
||||
f"兴趣增益:{math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain}"
|
||||
)
|
||||
|
||||
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] = current_willing
|
||||
|
||||
@@ -144,7 +149,9 @@ class MxpWillingManager(BaseWillingManager):
|
||||
):
|
||||
current_willing += self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}")
|
||||
self.logger.debug(
|
||||
f"单聊增益:{self.single_chat_gain * (2 * self.last_response_person[w_info.chat_id][1] + 1)}"
|
||||
)
|
||||
|
||||
current_willing += self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)
|
||||
if self.is_debug:
|
||||
@@ -168,7 +175,6 @@ class MxpWillingManager(BaseWillingManager):
|
||||
current_willing = 0
|
||||
if self.is_debug:
|
||||
self.logger.debug("进行中消息惩罚:归0")
|
||||
|
||||
|
||||
probability = self._willing_to_probability(current_willing)
|
||||
|
||||
@@ -225,18 +231,21 @@ class MxpWillingManager(BaseWillingManager):
|
||||
|
||||
if chat.stream_id not in self.chat_fatigue_punishment_list:
|
||||
self.chat_fatigue_punishment_list[chat.stream_id] = [
|
||||
(current_time, self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60)
|
||||
(
|
||||
current_time,
|
||||
self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60,
|
||||
)
|
||||
]
|
||||
self.chat_fatigue_willing_attenuation[chat.stream_id] = - 2 * self.basic_maximum_willing * self.fatigue_coefficient
|
||||
|
||||
|
||||
self.chat_fatigue_willing_attenuation[chat.stream_id] = (
|
||||
-2 * self.basic_maximum_willing * self.fatigue_coefficient
|
||||
)
|
||||
|
||||
def _willing_to_probability(self, willing: float) -> float:
|
||||
"""意愿值转化为概率"""
|
||||
willing = max(0, willing)
|
||||
if willing < 2:
|
||||
probability = math.atan(willing * 2) / math.pi * 2
|
||||
elif willing <2.5:
|
||||
elif willing < 2.5:
|
||||
probability = math.atan(willing * 4) / math.pi * 2
|
||||
else:
|
||||
probability = 1
|
||||
@@ -252,8 +261,13 @@ class MxpWillingManager(BaseWillingManager):
|
||||
# 清理过期消息
|
||||
current_time = time.time()
|
||||
message_times = [
|
||||
msg_time for msg_time in message_times if current_time - msg_time <
|
||||
self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60
|
||||
msg_time
|
||||
for msg_time in message_times
|
||||
if current_time - msg_time
|
||||
< self.number_of_message_storage
|
||||
* self.basic_maximum_willing
|
||||
/ self.expected_replies_per_min
|
||||
* 60
|
||||
]
|
||||
self.chat_new_message_time[chat_id] = message_times
|
||||
|
||||
@@ -271,7 +285,6 @@ class MxpWillingManager(BaseWillingManager):
|
||||
if self.is_debug:
|
||||
self.logger.debug(f"聊天流意愿值更新:{self.chat_reply_willing}")
|
||||
|
||||
|
||||
def _get_relationship_level_num(self, relationship_value) -> int:
|
||||
"""关系等级计算"""
|
||||
if -1000 <= relationship_value < -227:
|
||||
@@ -292,9 +305,8 @@ class MxpWillingManager(BaseWillingManager):
|
||||
|
||||
def _basic_willing_culculate(self, t: float) -> float:
|
||||
"""基础意愿值计算"""
|
||||
return math.tan(t * self.expected_replies_per_min * math.pi
|
||||
/ 120 / self.number_of_message_storage) / 2
|
||||
|
||||
return math.tan(t * self.expected_replies_per_min * math.pi / 120 / self.number_of_message_storage) / 2
|
||||
|
||||
async def _fatigue_attenuation(self):
|
||||
"""疲劳衰减"""
|
||||
while True:
|
||||
@@ -305,10 +317,13 @@ class MxpWillingManager(BaseWillingManager):
|
||||
fatigue_list = [z for z in fatigue_list if current_time - z[0] < z[1]]
|
||||
self.chat_fatigue_willing_attenuation[chat_id] = 0
|
||||
for start_time, duration in fatigue_list:
|
||||
self.chat_fatigue_willing_attenuation[chat_id] += \
|
||||
(self.chat_reply_willing[chat_id] * 2 / math.pi * math.asin(
|
||||
2 * (current_time - start_time) / duration - 1
|
||||
) - self.chat_reply_willing[chat_id]) * self.fatigue_coefficient
|
||||
self.chat_fatigue_willing_attenuation[chat_id] += (
|
||||
self.chat_reply_willing[chat_id]
|
||||
* 2
|
||||
/ math.pi
|
||||
* math.asin(2 * (current_time - start_time) / duration - 1)
|
||||
- self.chat_reply_willing[chat_id]
|
||||
) * self.fatigue_coefficient
|
||||
|
||||
async def get_willing(self, chat_id):
|
||||
return self.temporary_willing
|
||||
|
||||
Reference in New Issue
Block a user