fix ruff
This commit is contained in:
@@ -213,7 +213,8 @@ class BotConfig:
|
|||||||
schedule_config = parent["schedule"]
|
schedule_config = parent["schedule"]
|
||||||
config.ENABLE_SCHEDULE_GEN = schedule_config.get("enable_schedule_gen", config.ENABLE_SCHEDULE_GEN)
|
config.ENABLE_SCHEDULE_GEN = schedule_config.get("enable_schedule_gen", config.ENABLE_SCHEDULE_GEN)
|
||||||
config.PROMPT_SCHEDULE_GEN = schedule_config.get("prompt_schedule_gen", config.PROMPT_SCHEDULE_GEN)
|
config.PROMPT_SCHEDULE_GEN = schedule_config.get("prompt_schedule_gen", config.PROMPT_SCHEDULE_GEN)
|
||||||
logger.info(f"载入自定义日程prompt:{schedule_config.get('prompt_schedule_gen', config.PROMPT_SCHEDULE_GEN)}")
|
logger.info(
|
||||||
|
f"载入自定义日程prompt:{schedule_config.get('prompt_schedule_gen', config.PROMPT_SCHEDULE_GEN)}")
|
||||||
|
|
||||||
def emoji(parent: dict):
|
def emoji(parent: dict):
|
||||||
emoji_config = parent["emoji"]
|
emoji_config = parent["emoji"]
|
||||||
@@ -247,10 +248,13 @@ class BotConfig:
|
|||||||
config.willing_mode = willing_config.get("willing_mode", config.willing_mode)
|
config.willing_mode = willing_config.get("willing_mode", config.willing_mode)
|
||||||
|
|
||||||
if config.INNER_VERSION in SpecifierSet(">=0.0.11"):
|
if config.INNER_VERSION in SpecifierSet(">=0.0.11"):
|
||||||
config.response_willing_amplifier = willing_config.get("response_willing_amplifier", config.response_willing_amplifier)
|
config.response_willing_amplifier = willing_config.get(
|
||||||
config.response_interested_rate_amplifier = willing_config.get("response_interested_rate_amplifier", config.response_interested_rate_amplifier)
|
"response_willing_amplifier", config.response_willing_amplifier)
|
||||||
|
config.response_interested_rate_amplifier = willing_config.get(
|
||||||
|
"response_interested_rate_amplifier", config.response_interested_rate_amplifier)
|
||||||
config.down_frequency_rate = willing_config.get("down_frequency_rate", config.down_frequency_rate)
|
config.down_frequency_rate = willing_config.get("down_frequency_rate", config.down_frequency_rate)
|
||||||
config.emoji_response_penalty = willing_config.get("emoji_response_penalty", config.emoji_response_penalty)
|
config.emoji_response_penalty = willing_config.get(
|
||||||
|
"emoji_response_penalty", config.emoji_response_penalty)
|
||||||
|
|
||||||
def model(parent: dict):
|
def model(parent: dict):
|
||||||
# 加载模型配置
|
# 加载模型配置
|
||||||
@@ -392,9 +396,11 @@ class BotConfig:
|
|||||||
|
|
||||||
def response_spliter(parent: dict):
|
def response_spliter(parent: dict):
|
||||||
response_spliter_config = parent["response_spliter"]
|
response_spliter_config = parent["response_spliter"]
|
||||||
config.enable_response_spliter = response_spliter_config.get("enable_response_spliter", config.enable_response_spliter)
|
config.enable_response_spliter = response_spliter_config.get(
|
||||||
|
"enable_response_spliter", config.enable_response_spliter)
|
||||||
config.response_max_length = response_spliter_config.get("response_max_length", config.response_max_length)
|
config.response_max_length = response_spliter_config.get("response_max_length", config.response_max_length)
|
||||||
config.response_max_sentence_num = response_spliter_config.get("response_max_sentence_num", config.response_max_sentence_num)
|
config.response_max_sentence_num = response_spliter_config.get(
|
||||||
|
"response_max_sentence_num", config.response_max_sentence_num)
|
||||||
|
|
||||||
def groups(parent: dict):
|
def groups(parent: dict):
|
||||||
groups_config = parent["groups"]
|
groups_config = parent["groups"]
|
||||||
@@ -405,7 +411,8 @@ class BotConfig:
|
|||||||
def experimental(parent: dict):
|
def experimental(parent: dict):
|
||||||
experimental_config = parent["experimental"]
|
experimental_config = parent["experimental"]
|
||||||
config.enable_friend_chat = experimental_config.get("enable_friend_chat", config.enable_friend_chat)
|
config.enable_friend_chat = experimental_config.get("enable_friend_chat", config.enable_friend_chat)
|
||||||
|
config.enable_think_flow = experimental_config.get("enable_think_flow", config.enable_think_flow)
|
||||||
|
|
||||||
# 版本表达式:>=1.0.0,<2.0.0
|
# 版本表达式:>=1.0.0,<2.0.0
|
||||||
# 允许字段:func: method, support: str, notice: str, necessary: bool
|
# 允许字段:func: method, support: str, notice: str, necessary: bool
|
||||||
# 如果使用 notice 字段,在该组配置加载时,会展示该字段对用户的警示
|
# 如果使用 notice 字段,在该组配置加载时,会展示该字段对用户的警示
|
||||||
|
|||||||
@@ -13,7 +13,6 @@ from .relationship_manager import relationship_manager
|
|||||||
from src.common.logger import get_module_logger
|
from src.common.logger import get_module_logger
|
||||||
|
|
||||||
from src.think_flow_demo.heartflow import subheartflow_manager
|
from src.think_flow_demo.heartflow import subheartflow_manager
|
||||||
from src.think_flow_demo.outer_world import outer_world
|
|
||||||
|
|
||||||
logger = get_module_logger("prompt")
|
logger = get_module_logger("prompt")
|
||||||
|
|
||||||
@@ -58,9 +57,9 @@ class PromptBuilder:
|
|||||||
mood_prompt = mood_manager.get_prompt()
|
mood_prompt = mood_manager.get_prompt()
|
||||||
|
|
||||||
# 日程构建
|
# 日程构建
|
||||||
current_date = time.strftime("%Y-%m-%d", time.localtime())
|
# current_date = time.strftime("%Y-%m-%d", time.localtime())
|
||||||
current_time = time.strftime("%H:%M:%S", time.localtime())
|
# current_time = time.strftime("%H:%M:%S", time.localtime())
|
||||||
bot_schedule_now_time, bot_schedule_now_activity = bot_schedule.get_current_task()
|
# bot_schedule_now_time, bot_schedule_now_activity = bot_schedule.get_current_task()
|
||||||
|
|
||||||
# 获取聊天上下文
|
# 获取聊天上下文
|
||||||
chat_in_group = True
|
chat_in_group = True
|
||||||
|
|||||||
@@ -122,11 +122,12 @@ class RelationshipManager:
|
|||||||
relationship.relationship_value = float(relationship.relationship_value.to_decimal())
|
relationship.relationship_value = float(relationship.relationship_value.to_decimal())
|
||||||
else:
|
else:
|
||||||
relationship.relationship_value = float(relationship.relationship_value)
|
relationship.relationship_value = float(relationship.relationship_value)
|
||||||
logger.info(f"[关系管理] 用户 {user_id}({platform}) 的关系值已转换为double类型: {relationship.relationship_value}")
|
logger.info(
|
||||||
|
f"[关系管理] 用户 {user_id}({platform}) 的关系值已转换为double类型: {relationship.relationship_value}") # noqa: E501
|
||||||
except (ValueError, TypeError):
|
except (ValueError, TypeError):
|
||||||
# 如果不能解析/强转则将relationship.relationship_value设置为double类型的0
|
# 如果不能解析/强转则将relationship.relationship_value设置为double类型的0
|
||||||
relationship.relationship_value = 0.0
|
relationship.relationship_value = 0.0
|
||||||
logger.warning(f"[关系管理] 用户 {user_id}({platform}) 的关系值无法转换为double类型,已设置为0")
|
logger.warning(f"[关系管理] 用户 {user_id}({platform}) 的无法转换为double类型,已设置为0")
|
||||||
relationship.relationship_value += value
|
relationship.relationship_value += value
|
||||||
await self.storage_relationship(relationship)
|
await self.storage_relationship(relationship)
|
||||||
relationship.saved = True
|
relationship.saved = True
|
||||||
|
|||||||
@@ -21,7 +21,8 @@ class SubHeartflow:
|
|||||||
self.current_mind = ""
|
self.current_mind = ""
|
||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state : CuttentState = CuttentState()
|
self.current_state : CuttentState = CuttentState()
|
||||||
self.llm_model = LLM_request(model=global_config.llm_sub_heartflow, temperature=0.7, max_tokens=600, request_type="sub_heart_flow")
|
self.llm_model = LLM_request(
|
||||||
|
model=global_config.llm_sub_heartflow, temperature=0.7, max_tokens=600, request_type="sub_heart_flow")
|
||||||
self.outer_world = None
|
self.outer_world = None
|
||||||
|
|
||||||
self.main_heartflow_info = ""
|
self.main_heartflow_info = ""
|
||||||
@@ -52,15 +53,15 @@ class SubHeartflow:
|
|||||||
related_memory_info = 'memory'
|
related_memory_info = 'memory'
|
||||||
message_stream_info = self.outer_world.talking_summary
|
message_stream_info = self.outer_world.talking_summary
|
||||||
|
|
||||||
prompt = f""
|
prompt = ""
|
||||||
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
|
# prompt += f"麦麦的总体想法是:{self.main_heartflow_info}\n\n"
|
||||||
prompt += f"{personality_info}\n"
|
prompt += f"{personality_info}\n"
|
||||||
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{message_stream_info}\n"
|
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{message_stream_info}\n"
|
||||||
prompt += f"你想起来{related_memory_info}。"
|
prompt += f"你想起来{related_memory_info}。"
|
||||||
prompt += f"刚刚你的想法是{current_thinking_info}。"
|
prompt += f"刚刚你的想法是{current_thinking_info}。"
|
||||||
prompt += f"你现在{mood_info}。"
|
prompt += f"你现在{mood_info}。"
|
||||||
prompt += f"现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:"
|
prompt += "现在你接下去继续思考,产生新的想法,不要分点输出,输出连贯的内心独白,不要太长,"
|
||||||
|
prompt += "但是记得结合上述的消息,要记得维持住你的人设,关注聊天和新内容,不要思考太多:"
|
||||||
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
|
|
||||||
self.update_current_mind(reponse)
|
self.update_current_mind(reponse)
|
||||||
@@ -80,7 +81,7 @@ class SubHeartflow:
|
|||||||
message_new_info = chat_talking_prompt
|
message_new_info = chat_talking_prompt
|
||||||
reply_info = reply_content
|
reply_info = reply_content
|
||||||
|
|
||||||
prompt = f""
|
prompt = ""
|
||||||
prompt += f"{personality_info}\n"
|
prompt += f"{personality_info}\n"
|
||||||
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{message_stream_info}\n"
|
prompt += f"现在你正在上网,和qq群里的网友们聊天,群里正在聊的话题是:{message_stream_info}\n"
|
||||||
prompt += f"你想起来{related_memory_info}。"
|
prompt += f"你想起来{related_memory_info}。"
|
||||||
@@ -88,7 +89,8 @@ class SubHeartflow:
|
|||||||
prompt += f"你现在看到了网友们发的新消息:{message_new_info}\n"
|
prompt += f"你现在看到了网友们发的新消息:{message_new_info}\n"
|
||||||
prompt += f"你刚刚回复了群友们:{reply_info}"
|
prompt += f"你刚刚回复了群友们:{reply_info}"
|
||||||
prompt += f"你现在{mood_info}。"
|
prompt += f"你现在{mood_info}。"
|
||||||
prompt += f"现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白,不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,以及你回复的内容,不要思考太多:"
|
prompt += "现在你接下去继续思考,产生新的想法,记得保留你刚刚的想法,不要分点输出,输出连贯的内心独白"
|
||||||
|
prompt += "不要太长,但是记得结合上述的消息,要记得你的人设,关注聊天和新内容,以及你回复的内容,不要思考太多:"
|
||||||
|
|
||||||
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
|
|
||||||
@@ -103,13 +105,13 @@ class SubHeartflow:
|
|||||||
current_thinking_info = self.current_mind
|
current_thinking_info = self.current_mind
|
||||||
mood_info = self.current_state.mood
|
mood_info = self.current_state.mood
|
||||||
# print("麦麦闹情绪了2")
|
# print("麦麦闹情绪了2")
|
||||||
prompt = f""
|
prompt = ""
|
||||||
prompt += f"{personality_info}\n"
|
prompt += f"{personality_info}\n"
|
||||||
prompt += f"现在你正在上网,和qq群里的网友们聊天"
|
prompt += "现在你正在上网,和qq群里的网友们聊天"
|
||||||
prompt += f"你现在的想法是{current_thinking_info}。"
|
prompt += f"你现在的想法是{current_thinking_info}。"
|
||||||
prompt += f"你现在{mood_info}。"
|
prompt += f"你现在{mood_info}。"
|
||||||
prompt += f"现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。"
|
prompt += "现在请你思考,你想不想发言或者回复,请你输出一个数字,1-10,1表示非常不想,10表示非常想。"
|
||||||
prompt += f"请你用<>包裹你的回复意愿,例如输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
|
prompt += "请你用<>包裹你的回复意愿,输出<1>表示不想回复,输出<10>表示非常想回复。请你考虑,你完全可以不回复"
|
||||||
|
|
||||||
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
response, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
# 解析willing值
|
# 解析willing值
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ from .current_mind import SubHeartflow
|
|||||||
from src.plugins.moods.moods import MoodManager
|
from src.plugins.moods.moods import MoodManager
|
||||||
from src.plugins.models.utils_model import LLM_request
|
from src.plugins.models.utils_model import LLM_request
|
||||||
from src.plugins.chat.config import global_config
|
from src.plugins.chat.config import global_config
|
||||||
from .outer_world import outer_world
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
class CuttentState:
|
class CuttentState:
|
||||||
@@ -21,7 +20,8 @@ class Heartflow:
|
|||||||
self.current_mind = "你什么也没想"
|
self.current_mind = "你什么也没想"
|
||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state : CuttentState = CuttentState()
|
self.current_state : CuttentState = CuttentState()
|
||||||
self.llm_model = LLM_request(model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow")
|
self.llm_model = LLM_request(
|
||||||
|
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow")
|
||||||
|
|
||||||
self._subheartflows = {}
|
self._subheartflows = {}
|
||||||
self.active_subheartflows_nums = 0
|
self.active_subheartflows_nums = 0
|
||||||
@@ -50,7 +50,8 @@ class Heartflow:
|
|||||||
prompt += f"刚刚你的主要想法是{current_thinking_info}。"
|
prompt += f"刚刚你的主要想法是{current_thinking_info}。"
|
||||||
prompt += f"你还有一些小想法,因为你在参加不同的群聊天,是你正在做的事情:{sub_flows_info}\n"
|
prompt += f"你还有一些小想法,因为你在参加不同的群聊天,是你正在做的事情:{sub_flows_info}\n"
|
||||||
prompt += f"你现在{mood_info}。"
|
prompt += f"你现在{mood_info}。"
|
||||||
prompt += f"现在你接下去继续思考,产生新的想法,但是要基于原有的主要想法,不要分点输出,输出连贯的内心独白,不要太长,但是记得结合上述的消息,关注新内容:"
|
prompt += "现在你接下去继续思考,产生新的想法,但是要基于原有的主要想法,不要分点输出,"
|
||||||
|
prompt += "输出连贯的内心独白,不要太长,但是记得结合上述的消息,关注新内容:"
|
||||||
|
|
||||||
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
|
|
||||||
@@ -84,7 +85,8 @@ class Heartflow:
|
|||||||
prompt += f"现在麦麦的想法是:{self.current_mind}\n"
|
prompt += f"现在麦麦的想法是:{self.current_mind}\n"
|
||||||
prompt += f"现在麦麦在qq群里进行聊天,聊天的话题如下:{minds_str}\n"
|
prompt += f"现在麦麦在qq群里进行聊天,聊天的话题如下:{minds_str}\n"
|
||||||
prompt += f"你现在{mood_info}\n"
|
prompt += f"你现在{mood_info}\n"
|
||||||
prompt += f"现在请你总结这些聊天内容,注意关注聊天内容对原有的想法的影响,输出连贯的内心独白,不要太长,但是记得结合上述的消息,要记得你的人设,关注新内容:"
|
prompt += '''现在请你总结这些聊天内容,注意关注聊天内容对原有的想法的影响,输出连贯的内心独白
|
||||||
|
不要太长,但是记得结合上述的消息,要记得你的人设,关注新内容:'''
|
||||||
|
|
||||||
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
reponse, reasoning_content = await self.llm_model.generate_response_async(prompt)
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ import asyncio
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from src.plugins.models.utils_model import LLM_request
|
from src.plugins.models.utils_model import LLM_request
|
||||||
from src.plugins.chat.config import global_config
|
from src.plugins.chat.config import global_config
|
||||||
import sys
|
|
||||||
from src.common.database import db
|
from src.common.database import db
|
||||||
|
|
||||||
#存储一段聊天的大致内容
|
#存储一段聊天的大致内容
|
||||||
@@ -19,7 +18,8 @@ class Talking_info:
|
|||||||
|
|
||||||
self.oberve_interval = 3
|
self.oberve_interval = 3
|
||||||
|
|
||||||
self.llm_summary = LLM_request(model=global_config.llm_outer_world, temperature=0.7, max_tokens=300, request_type="outer_world")
|
self.llm_summary = LLM_request(
|
||||||
|
model=global_config.llm_outer_world, temperature=0.7, max_tokens=300, request_type="outer_world")
|
||||||
|
|
||||||
async def start_observe(self):
|
async def start_observe(self):
|
||||||
while True:
|
while True:
|
||||||
@@ -73,8 +73,9 @@ class Talking_info:
|
|||||||
prompt = ""
|
prompt = ""
|
||||||
prompt = f"你正在参与一个qq群聊的讨论,这个群之前在聊的内容是:{self.talking_summary}\n"
|
prompt = f"你正在参与一个qq群聊的讨论,这个群之前在聊的内容是:{self.talking_summary}\n"
|
||||||
prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{self.talking_message_str}\n"
|
prompt += f"现在群里的群友们产生了新的讨论,有了新的发言,具体内容如下:{self.talking_message_str}\n"
|
||||||
prompt += f"以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n"
|
prompt += '''以上是群里在进行的聊天,请你对这个聊天内容进行总结,总结内容要包含聊天的大致内容,
|
||||||
prompt += f"总结概括:"
|
以及聊天中的一些重要信息,记得不要分点,不要太长,精简的概括成一段文本\n'''
|
||||||
|
prompt += "总结概括:"
|
||||||
self.talking_summary, reasoning_content = await self.llm_summary.generate_response_async(prompt)
|
self.talking_summary, reasoning_content = await self.llm_summary.generate_response_async(prompt)
|
||||||
|
|
||||||
def translate_message_list_to_str(self):
|
def translate_message_list_to_str(self):
|
||||||
@@ -94,7 +95,8 @@ class OuterWorld:
|
|||||||
self.outer_world_info = ""
|
self.outer_world_info = ""
|
||||||
self.start_time = int(datetime.now().timestamp())
|
self.start_time = int(datetime.now().timestamp())
|
||||||
|
|
||||||
self.llm_summary = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=600, request_type="outer_world_info")
|
self.llm_summary = LLM_request(
|
||||||
|
model=global_config.llm_outer_world, temperature=0.7, max_tokens=600, request_type="outer_world_info")
|
||||||
|
|
||||||
async def check_and_add_new_observe(self):
|
async def check_and_add_new_observe(self):
|
||||||
# 获取所有聊天流
|
# 获取所有聊天流
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ personality_2_probability = 0.2 # 第二种人格出现概率,可以为0
|
|||||||
personality_3_probability = 0.1 # 第三种人格出现概率,请确保三个概率相加等于1
|
personality_3_probability = 0.1 # 第三种人格出现概率,请确保三个概率相加等于1
|
||||||
|
|
||||||
[schedule]
|
[schedule]
|
||||||
enable_schedule_gen = true # 是否启用日程表
|
enable_schedule_gen = true # 是否启用日程表(尚未完成)
|
||||||
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
|
prompt_schedule_gen = "用几句话描述描述性格特点或行动规律,这个特征会用来生成日程表"
|
||||||
|
|
||||||
[message]
|
[message]
|
||||||
|
|||||||
Reference in New Issue
Block a user