针对memory修改

This commit is contained in:
tcmofashi
2025-03-02 16:06:11 +08:00
parent 97834e7a93
commit c0cb28e10d
8 changed files with 23 additions and 18 deletions

View File

@@ -85,7 +85,7 @@ async def monitor_relationships():
async def build_memory_task():
"""每30秒执行一次记忆构建"""
print("\033[1;32m[记忆构建]\033[0m 开始构建记忆...")
hippocampus.build_memory(chat_size=12)
await hippocampus.build_memory(chat_size=12)
print("\033[1;32m[记忆构建]\033[0m 记忆构建完成")

View File

@@ -37,7 +37,7 @@ class BotConfig:
talk_frequency_down_groups = set()
ban_user_id = set()
build_memory_interval: int = 60 # 记忆构建间隔(秒)
build_memory_interval: int = 600 # 记忆构建间隔(秒)
EMOJI_CHECK_INTERVAL: int = 120 # 表情包检查间隔(分钟)
EMOJI_REGISTER_INTERVAL: int = 10 # 表情包注册间隔(分钟)

View File

@@ -110,7 +110,7 @@ class LLMResponseGenerator:
"model": model_name,
"messages": [{"role": "user", "content": prompt}],
"stream": False,
"max_tokens": 1024,
"max_tokens": 2048,
"temperature": 0.7
}
@@ -118,7 +118,7 @@ class LLMResponseGenerator:
"model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"messages": [{"role": "user", "content": prompt_check}],
"stream": False,
"max_tokens": 1024,
"max_tokens": 2048,
"temperature": 0.7
}
@@ -210,13 +210,13 @@ class LLMResponseGenerator:
return await self._generate_base_response(
message,
"deepseek-reasoner",
{"temperature": 0.7, "max_tokens": 1024}
{"temperature": 0.7, "max_tokens": 2048}
)
else:
return await self._generate_base_response(
message,
"Pro/deepseek-ai/DeepSeek-R1",
{"temperature": 0.7, "max_tokens": 1024}
{"temperature": 0.7, "max_tokens": 2048}
)
async def _generate_v3_response(self, message: Message) -> Optional[str]:
@@ -225,13 +225,13 @@ class LLMResponseGenerator:
return await self._generate_base_response(
message,
"deepseek-chat",
{"temperature": 0.8, "max_tokens": 1024}
{"temperature": 0.8, "max_tokens": 2048}
)
else:
return await self._generate_base_response(
message,
"Pro/deepseek-ai/DeepSeek-V3",
{"temperature": 0.8, "max_tokens": 1024}
{"temperature": 0.8, "max_tokens": 2048}
)
async def _generate_r1_distill_response(self, message: Message) -> Optional[str]:
@@ -239,7 +239,7 @@ class LLMResponseGenerator:
return await self._generate_base_response(
message,
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
{"temperature": 0.7, "max_tokens": 1024}
{"temperature": 0.7, "max_tokens": 2048}
)
async def _get_group_chat_context(self, message: Message) -> str:

View File

@@ -171,7 +171,7 @@ class PromptBuilder:
#额外信息要求
extra_info = '''但是记得回复平淡一些,简短一些,记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只需要输出回复内容就好,不要输出其他任何内容'''
extra_info = '''但是记得回复平淡一些,简短一些,尤其注意在没明确提到时不要过多提及自身的背景, 记住不要输出多余内容(包括前后缀,冒号和引号,括号,表情等)只需要输出回复内容就好,不要输出其他任何内容'''
@@ -188,9 +188,9 @@ class PromptBuilder:
prompt += f"{prompt_ger}\n"
prompt += f"{extra_info}\n"
activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。"
activate_prompt_check=f"以上是群里正在进行的聊天,昵称为 '{sender_name}' 的用户说的:{message_txt}。引起了你的注意,你和他{relation_prompt},你想要{relation_prompt_2},但是这不一定是合适的时机,请你决定是否要回应这条消息。不要太受现在做的事情影响,因为摸鱼是很正常的。"
prompt_personality_check = ''
extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息一定要回复如果是刚刚理会过的人发送消息且还在与那个人对话中的话一定要回复其他话题如果合适搭话也可以回复如果认为应该回复请输出yes否则输出no请注意是决定是否需要回复而不是编写回复内容除了yes和no不要输出任何回复内容。"
extra_check_info=f"请注意把握群里的聊天内容的基础上,综合群内的氛围,例如,和{global_config.BOT_NICKNAME}相关的话题要积极回复,如果是at自己的消息,无论如何一定要回复如果是刚刚理会过的人发送消息且还在与那个人对话中的话一定要回复其他话题如果合适搭话也可以回复如果认为应该回复请输出yes否则输出no请注意是决定是否需要回复而不是编写回复内容除了yes和no不要输出任何回复内容。"
if personality_choice < 4/6: # 第一种人格
prompt_personality_check = f'''你的网名叫{global_config.BOT_NICKNAME}{personality[0]},{promt_info_prompt} {activate_prompt_check} {extra_check_info}'''
elif personality_choice < 1: # 第二种人格

View File

@@ -69,7 +69,7 @@ class TopicIdentifier:
'', '按照', '', '', '', '比如', '', '除了', '', '', '对于',
'根据', '关于', '', '', '', '', '经过', '', '', '', '通过',
'', '', '', '为了', '围绕', '', '', '由于', '', '', '沿', '沿着',
'', '依照', '', '', '因为', '', '', '', '', '自从'
'', '依照', '', '', '因为', '', '', '', '', '自从','[]'
}
# 过滤掉停用词和标点符号,只保留名词和动词
@@ -78,7 +78,7 @@ class TopicIdentifier:
if word not in stop_words and not word.strip() in {
'', '', '', '', '', '', '', '"', '"', ''', ''',
'', '', '', '', '', '', '', '', '·', '', '~',
'', '+', '=', '-'
'', '+', '=', '-','[',']'
}:
filtered_words.append(word)

View File

@@ -61,7 +61,7 @@ class WillingManager:
if is_mentioned_bot and user_id == int(964959351):
reply_probability = 1
return reply_probability
return reply_probability+0.5
def change_reply_willing_sent(self, group_id: int):
"""开始思考后降低群组的回复意愿"""
@@ -72,7 +72,7 @@ class WillingManager:
"""发送消息后提高群组的回复意愿"""
current_willing = self.group_reply_willing.get(group_id, 0)
if current_willing < 1:
self.group_reply_willing[group_id] = min(1, current_willing + 0.3)
self.group_reply_willing[group_id] = min(2, current_willing + 0.8)
async def ensure_started(self):
"""确保衰减任务已启动"""

View File

@@ -192,7 +192,7 @@ class Hippocampus:
chat_text.append(chat_)
return chat_text
def build_memory(self,chat_size=12):
async def build_memory(self,chat_size=12):
#最近消息获取频率
time_frequency = {'near':1,'mid':2,'far':2}
memory_sample = self.get_memory_sample(chat_size,time_frequency)
@@ -211,10 +211,12 @@ class Hippocampus:
first_memory = set()
first_memory = self.memory_compress(input_text, 2.5)
# 延时防止访问超频
# time.sleep(5)
# time.sleep(60)
#将记忆加入到图谱中
for topic, memory in first_memory:
topics = segment_text(topic)
if '[' in topic or topic=='':
continue
print(f"\033[1;34m话题\033[0m: {topic},节点: {topics}, 记忆: {memory}")
for split_topic in topics:
self.memory_graph.add_dot(split_topic,memory)
@@ -240,6 +242,8 @@ class Hippocampus:
# print(topics)
compressed_memory = set()
for topic in topics:
if topic=='' or '[' in topic:
continue
topic_what_prompt = topic_what(input_text,topic)
topic_what_response = self.llm_model_small.generate_response(topic_what_prompt)
compressed_memory.add((topic.strip(), topic_what_response[0])) # 将话题和记忆作为元组存储

View File

@@ -278,6 +278,7 @@ def main():
#将记忆加入到图谱中
for topic, memory in first_memory:
# continue
topics = segment_text(topic)
print(f"\033[1;34m话题\033[0m: {topic},节点: {topics}, 记忆: {memory}")
for split_topic in topics: