feat 将心流功能作为 实验功能
This commit is contained in:
@@ -51,7 +51,10 @@ async def start_think_flow():
|
|||||||
try:
|
try:
|
||||||
outer_world_task = asyncio.create_task(outer_world.open_eyes())
|
outer_world_task = asyncio.create_task(outer_world.open_eyes())
|
||||||
logger.success("大脑和外部世界启动成功")
|
logger.success("大脑和外部世界启动成功")
|
||||||
return outer_world_task
|
# 启动心流系统
|
||||||
|
heartflow_task = asyncio.create_task(subheartflow_manager.heartflow_start_working())
|
||||||
|
logger.success("心流系统启动成功")
|
||||||
|
return outer_world_task, heartflow_task
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"启动大脑和外部世界失败: {e}")
|
logger.error(f"启动大脑和外部世界失败: {e}")
|
||||||
raise
|
raise
|
||||||
@@ -70,12 +73,10 @@ async def start_background_tasks():
|
|||||||
logger.success("情绪管理器启动成功")
|
logger.success("情绪管理器启动成功")
|
||||||
|
|
||||||
# 启动大脑和外部世界
|
# 启动大脑和外部世界
|
||||||
|
if global_config.enable_think_flow:
|
||||||
|
logger.success("启动测试功能:心流系统")
|
||||||
await start_think_flow()
|
await start_think_flow()
|
||||||
|
|
||||||
# 启动心流系统
|
|
||||||
heartflow_task = asyncio.create_task(subheartflow_manager.heartflow_start_working())
|
|
||||||
logger.success("心流系统启动成功")
|
|
||||||
|
|
||||||
# 只启动表情包管理任务
|
# 只启动表情包管理任务
|
||||||
asyncio.create_task(emoji_manager.start_periodic_check())
|
asyncio.create_task(emoji_manager.start_periodic_check())
|
||||||
await bot_schedule.initialize()
|
await bot_schedule.initialize()
|
||||||
|
|||||||
@@ -91,7 +91,9 @@ class ChatBot:
|
|||||||
)
|
)
|
||||||
message.update_chat_stream(chat)
|
message.update_chat_stream(chat)
|
||||||
|
|
||||||
|
|
||||||
#创建 心流 观察
|
#创建 心流 观察
|
||||||
|
if global_config.enable_think_flow:
|
||||||
await outer_world.check_and_add_new_observe()
|
await outer_world.check_and_add_new_observe()
|
||||||
subheartflow_manager.create_subheartflow(chat.stream_id)
|
subheartflow_manager.create_subheartflow(chat.stream_id)
|
||||||
|
|
||||||
@@ -142,10 +144,14 @@ class ChatBot:
|
|||||||
interested_rate=interested_rate,
|
interested_rate=interested_rate,
|
||||||
sender_id=str(message.message_info.user_info.user_id),
|
sender_id=str(message.message_info.user_info.user_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if global_config.enable_think_flow:
|
||||||
current_willing_old = willing_manager.get_willing(chat_stream=chat)
|
current_willing_old = willing_manager.get_willing(chat_stream=chat)
|
||||||
current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing-5)/4
|
current_willing_new = (subheartflow_manager.get_subheartflow(chat.stream_id).current_state.willing-5)/4
|
||||||
print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}")
|
print(f"旧回复意愿:{current_willing_old},新回复意愿:{current_willing_new}")
|
||||||
current_willing = (current_willing_old + current_willing_new) / 2
|
current_willing = (current_willing_old + current_willing_new) / 2
|
||||||
|
else:
|
||||||
|
current_willing = willing_manager.get_willing(chat_stream=chat)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]"
|
f"[{current_time}][{chat.group_info.group_name if chat.group_info else '私聊'}]"
|
||||||
@@ -185,13 +191,16 @@ class ChatBot:
|
|||||||
# print(f"response: {response}")
|
# print(f"response: {response}")
|
||||||
if response:
|
if response:
|
||||||
stream_id = message.chat_stream.stream_id
|
stream_id = message.chat_stream.stream_id
|
||||||
|
|
||||||
|
if global_config.enable_think_flow:
|
||||||
chat_talking_prompt = ""
|
chat_talking_prompt = ""
|
||||||
if stream_id:
|
if stream_id:
|
||||||
chat_talking_prompt = get_recent_group_detailed_plain_text(
|
chat_talking_prompt = get_recent_group_detailed_plain_text(
|
||||||
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
stream_id, limit=global_config.MAX_CONTEXT_SIZE, combine=True
|
||||||
)
|
)
|
||||||
|
|
||||||
await subheartflow_manager.get_subheartflow(stream_id).do_after_reply(response,chat_talking_prompt)
|
await subheartflow_manager.get_subheartflow(stream_id).do_after_reply(response,chat_talking_prompt)
|
||||||
|
|
||||||
|
|
||||||
# print(f"有response: {response}")
|
# print(f"有response: {response}")
|
||||||
container = message_manager.get_container(chat.stream_id)
|
container = message_manager.get_container(chat.stream_id)
|
||||||
thinking_message = None
|
thinking_message = None
|
||||||
|
|||||||
@@ -130,6 +130,8 @@ class BotConfig:
|
|||||||
|
|
||||||
# 实验性
|
# 实验性
|
||||||
llm_outer_world: Dict[str, str] = field(default_factory=lambda: {})
|
llm_outer_world: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
|
llm_sub_heartflow: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
|
llm_heartflow: Dict[str, str] = field(default_factory=lambda: {})
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -265,6 +267,8 @@ class BotConfig:
|
|||||||
"embedding",
|
"embedding",
|
||||||
"moderation",
|
"moderation",
|
||||||
"llm_outer_world",
|
"llm_outer_world",
|
||||||
|
"llm_sub_heartflow",
|
||||||
|
"llm_heartflow",
|
||||||
]
|
]
|
||||||
|
|
||||||
for item in config_list:
|
for item in config_list:
|
||||||
|
|||||||
@@ -37,7 +37,10 @@ class PromptBuilder:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# outer_world_info = outer_world.outer_world_info
|
# outer_world_info = outer_world.outer_world_info
|
||||||
|
if global_config.enable_think_flow:
|
||||||
current_mind_info = subheartflow_manager.get_subheartflow(stream_id).current_mind
|
current_mind_info = subheartflow_manager.get_subheartflow(stream_id).current_mind
|
||||||
|
else:
|
||||||
|
current_mind_info = ""
|
||||||
|
|
||||||
relation_prompt = ""
|
relation_prompt = ""
|
||||||
for person in who_chat_in_group:
|
for person in who_chat_in_group:
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ class MoodManager:
|
|||||||
time_diff = current_time - self.last_update
|
time_diff = current_time - self.last_update
|
||||||
|
|
||||||
# Valence 向中性(0)回归
|
# Valence 向中性(0)回归
|
||||||
valence_target = -0.2
|
valence_target = 0
|
||||||
self.current_mood.valence = valence_target + (self.current_mood.valence - valence_target) * math.exp(
|
self.current_mood.valence = valence_target + (self.current_mood.valence - valence_target) * math.exp(
|
||||||
-self.decay_rate_valence * time_diff
|
-self.decay_rate_valence * time_diff
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ class SubHeartflow:
|
|||||||
self.current_mind = ""
|
self.current_mind = ""
|
||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state : CuttentState = CuttentState()
|
self.current_state : CuttentState = CuttentState()
|
||||||
self.llm_model = LLM_request(model=global_config.llm_topic_judge, temperature=0.7, max_tokens=600, request_type="sub_heart_flow")
|
self.llm_model = LLM_request(model=global_config.llm_sub_heartflow, temperature=0.7, max_tokens=600, request_type="sub_heart_flow")
|
||||||
self.outer_world = None
|
self.outer_world = None
|
||||||
|
|
||||||
self.main_heartflow_info = ""
|
self.main_heartflow_info = ""
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ class Heartflow:
|
|||||||
self.current_mind = "你什么也没想"
|
self.current_mind = "你什么也没想"
|
||||||
self.past_mind = []
|
self.past_mind = []
|
||||||
self.current_state : CuttentState = CuttentState()
|
self.current_state : CuttentState = CuttentState()
|
||||||
self.llm_model = LLM_request(model=global_config.llm_topic_judge, temperature=0.6, max_tokens=1000, request_type="heart_flow")
|
self.llm_model = LLM_request(model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow")
|
||||||
|
|
||||||
self._subheartflows = {}
|
self._subheartflows = {}
|
||||||
self.active_subheartflows_nums = 0
|
self.active_subheartflows_nums = 0
|
||||||
|
|||||||
@@ -132,7 +132,7 @@ enable = true
|
|||||||
|
|
||||||
[experimental]
|
[experimental]
|
||||||
enable_friend_chat = false # 是否启用好友聊天
|
enable_friend_chat = false # 是否启用好友聊天
|
||||||
enable_thinkflow = false # 是否启用思维流
|
enable_think_flow = false # 是否启用思维流
|
||||||
|
|
||||||
#下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env.prod自定义的宏,使用自定义模型则选择定位相似的模型自己填写
|
#下面的模型若使用硅基流动则不需要更改,使用ds官方则改成.env.prod自定义的宏,使用自定义模型则选择定位相似的模型自己填写
|
||||||
#推理模型
|
#推理模型
|
||||||
@@ -203,3 +203,17 @@ name = "Qwen/Qwen2.5-7B-Instruct"
|
|||||||
provider = "SILICONFLOW"
|
provider = "SILICONFLOW"
|
||||||
pri_in = 0
|
pri_in = 0
|
||||||
pri_out = 0
|
pri_out = 0
|
||||||
|
|
||||||
|
[model.llm_sub_heartflow] #心流:建议使用qwen2.5 7b
|
||||||
|
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
|
provider = "SILICONFLOW"
|
||||||
|
pri_in = 1.26
|
||||||
|
pri_out = 1.26
|
||||||
|
|
||||||
|
[model.llm_heartflow] #心流:建议使用qwen2.5 32b
|
||||||
|
# name = "Pro/Qwen/Qwen2.5-7B-Instruct"
|
||||||
|
name = "Qwen/Qwen2.5-32B-Instruct"
|
||||||
|
provider = "SILICONFLOW"
|
||||||
|
pri_in = 1.26
|
||||||
|
pri_out = 1.26
|
||||||
Reference in New Issue
Block a user