better 新增了分割器,表情惩罚系数的自定义

This commit is contained in:
SengokuCola
2025-03-25 22:27:38 +08:00
parent c220f4c79e
commit 51990391fd
5 changed files with 40 additions and 196 deletions

View File

@@ -57,6 +57,7 @@ class BotConfig:
response_willing_amplifier: float = 1.0 # 回复意愿放大系数
response_interested_rate_amplifier: float = 1.0 # 回复兴趣度放大系数
down_frequency_rate: float = 3 # 降低回复频率的群组回复意愿降低系数
emoji_response_penalty: float = 0.0 # 表情包回复惩罚
# response
MODEL_R1_PROBABILITY: float = 0.8 # R1模型概率
@@ -101,6 +102,11 @@ class BotConfig:
chinese_typo_min_freq = 7 # 最小字频阈值
chinese_typo_tone_error_rate = 0.2 # 声调错误概率
chinese_typo_word_replace_rate = 0.02 # 整词替换概率
#response_spliter
enable_response_spliter = True # 是否启用回复分割器
response_max_length = 100 # 回复允许的最大长度
response_max_sentence_num = 3 # 回复允许的最大句子数
# remote
remote_enable: bool = True # 是否启用远程控制
@@ -242,7 +248,8 @@ class BotConfig:
config.response_willing_amplifier = willing_config.get("response_willing_amplifier", config.response_willing_amplifier)
config.response_interested_rate_amplifier = willing_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
config.down_frequency_rate = willing_config.get("down_frequency_rate", config.down_frequency_rate)
config.emoji_response_penalty = willing_config.get("emoji_response_penalty", config.emoji_response_penalty)
def model(parent: dict):
# 加载模型配置
model_config: dict = parent["model"]
@@ -378,6 +385,12 @@ class BotConfig:
config.chinese_typo_word_replace_rate = chinese_typo_config.get(
"word_replace_rate", config.chinese_typo_word_replace_rate
)
def response_spliter(parent: dict):
response_spliter_config = parent["response_spliter"]
config.enable_response_spliter = response_spliter_config.get("enable_response_spliter", config.enable_response_spliter)
config.response_max_length = response_spliter_config.get("response_max_length", config.response_max_length)
config.response_max_sentence_num = response_spliter_config.get("response_max_sentence_num", config.response_max_sentence_num)
def groups(parent: dict):
groups_config = parent["groups"]
@@ -409,6 +422,7 @@ class BotConfig:
"remote": {"func": remote, "support": ">=0.0.10", "necessary": False},
"keywords_reaction": {"func": keywords_reaction, "support": ">=0.0.2", "necessary": False},
"chinese_typo": {"func": chinese_typo, "support": ">=0.0.3", "necessary": False},
"response_spliter": {"func": response_spliter, "support": ">=0.0.11", "necessary": False},
"experimental": {"func": experimental, "support": ">=0.0.11", "necessary": False},
}

View File

@@ -244,21 +244,17 @@ def split_into_sentences_w_remove_punctuation(text: str) -> List[str]:
List[str]: 分割后的句子列表
"""
len_text = len(text)
if len_text < 5:
if len_text < 4:
if random.random() < 0.01:
return list(text) # 如果文本很短且触发随机条件,直接按字符分割
else:
return [text]
if len_text < 12:
split_strength = 0.3
split_strength = 0.2
elif len_text < 32:
split_strength = 0.7
split_strength = 0.6
else:
split_strength = 0.9
# 先移除换行符
# print(f"split_strength: {split_strength}")
# print(f"处理前的文本: {text}")
split_strength = 0.7
# 检查是否为西文字符段落
if not is_western_paragraph(text):
@@ -348,7 +344,7 @@ def random_remove_punctuation(text: str) -> str:
for i, char in enumerate(text):
if char == "" and i == text_len - 1: # 结尾的句号
if random.random() > 0.4: # 80%概率删除结尾句号
if random.random() > 0.1: # 90%概率删除结尾句号
continue
elif char == "":
rand = random.random()
@@ -364,10 +360,12 @@ def random_remove_punctuation(text: str) -> str:
def process_llm_response(text: str) -> List[str]:
# processed_response = process_text_with_typos(content)
# 对西文字符段落的回复长度设置为汉字字符的两倍
if len(text) > 100 and not is_western_paragraph(text) :
max_length = global_config.response_max_length
max_sentence_num = global_config.response_max_sentence_num
if len(text) > max_length and not is_western_paragraph(text) :
logger.warning(f"回复过长 ({len(text)} 字符),返回默认回复")
return ["懒得说"]
elif len(text) > 200 :
elif len(text) > max_length * 2 :
logger.warning(f"回复过长 ({len(text)} 字符),返回默认回复")
return ["懒得说"]
# 处理长消息
@@ -377,7 +375,10 @@ def process_llm_response(text: str) -> List[str]:
tone_error_rate=global_config.chinese_typo_tone_error_rate,
word_replace_rate=global_config.chinese_typo_word_replace_rate,
)
split_sentences = split_into_sentences_w_remove_punctuation(text)
if global_config.enable_response_spliter:
split_sentences = split_into_sentences_w_remove_punctuation(text)
else:
split_sentences = [text]
sentences = []
for sentence in split_sentences:
if global_config.chinese_typo_enable:
@@ -389,14 +390,14 @@ def process_llm_response(text: str) -> List[str]:
sentences.append(sentence)
# 检查分割后的消息数量是否过多超过3条
if len(sentences) > 3:
if len(sentences) > max_sentence_num:
logger.warning(f"分割后消息数量过多 ({len(sentences)} 条),返回默认回复")
return [f"{global_config.BOT_NICKNAME}不知道哦"]
return sentences
def calculate_typing_time(input_string: str, chinese_time: float = 0.4, english_time: float = 0.2) -> float:
def calculate_typing_time(input_string: str, chinese_time: float = 0.2, english_time: float = 0.1) -> float:
"""
计算输入字符串所需的时间,中文和英文字符有不同的输入时间
input_string (str): 输入的字符串