diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 2e0c0eb1f..8179b57ce 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -76,30 +76,31 @@ class ResponseGenerator: self, message: MessageThinking, model: LLM_request ) -> Optional[str]: """使用指定的模型生成回复""" - sender_name = ( - message.chat_stream.user_info.user_nickname - or f"用户{message.chat_stream.user_info.user_id}" - ) - if message.chat_stream.user_info.user_cardname: + sender_name = "" + if message.chat_stream.user_info.user_cardname and message.chat_stream.user_info.user_nickname: sender_name = f"[({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}]{message.chat_stream.user_info.user_cardname}" + elif message.chat_stream.user_info.user_nickname: + sender_name = f"({message.chat_stream.user_info.user_id}){message.chat_stream.user_info.user_nickname}" + else: + f"用户({message.chat_stream.user_info.user_id})" - # 获取关系值 - relationship_value = ( - relationship_manager.get_relationship( - message.chat_stream - ).relationship_value - if relationship_manager.get_relationship(message.chat_stream) - else 0.0 - ) - if relationship_value != 0.0: - # print(f"\033[1;32m[关系管理]\033[0m 回复中_当前关系值: {relationship_value}") - pass + # # 获取关系值 + # relationship_value = ( + # relationship_manager.get_relationship( + # message.chat_stream + # ).relationship_value + # if relationship_manager.get_relationship(message.chat_stream) + # else 0.0 + # ) + # if relationship_value != 0.0: + # # print(f"\033[1;32m[关系管理]\033[0m 回复中_当前关系值: {relationship_value}") + # pass # 构建prompt prompt, prompt_check = await prompt_builder._build_prompt( + message.chat_stream, message_txt=message.processed_plain_text, sender_name=sender_name, - relationship_value=relationship_value, stream_id=message.chat_stream.stream_id, ) diff --git a/src/plugins/chat/prompt_builder.py b/src/plugins/chat/prompt_builder.py index a41ed51e2..d9f1970a8 100644 --- a/src/plugins/chat/prompt_builder.py +++ b/src/plugins/chat/prompt_builder.py @@ -17,34 +17,65 @@ class PromptBuilder: self.prompt_built = '' self.activate_messages = '' - - - async def _build_prompt(self, - message_txt: str, - sender_name: str = "某人", - relationship_value: float = 0.0, - stream_id: Optional[int] = None) -> tuple[str, str]: + async def _build_prompt(self, + chat_stream, + message_txt: str, + sender_name: str = "某人", + stream_id: Optional[int] = None) -> tuple[str, str]: """构建prompt Args: message_txt: 消息文本 sender_name: 发送者昵称 - relationship_value: 关系值 + # relationship_value: 关系值 group_id: 群组ID Returns: str: 构建好的prompt """ - # 先禁用关系 - if 0 > 30: - relation_prompt = "关系特别特别好,你很喜欢喜欢他" - relation_prompt_2 = "热情发言或者回复" - elif 0 < -20: - relation_prompt = "关系很差,你很讨厌他" - relation_prompt_2 = "骂他" - else: - relation_prompt = "关系一般" - relation_prompt_2 = "发言或者回复" + # 关系 + relationship_level = ["厌恶", "冷漠", "一般", "友好", "喜欢", "爱慕"] + # position_attitude_list = ["反驳", "中立", "支持"] + relation_prompt2 = "" + # position_attitude = "" + relation_prompt2_list = ["极度厌恶,冷漠回应或直接辱骂", "关系较差,冷淡回复,保持距离", "关系一般,保持理性", \ + "关系较好,友善回复,积极互动", "关系很好,积极回复,关心对方", "关系暧昧,热情回复,无条件支持", ] + relation_prompt = "" + who_chat_in_group = [chat_stream] + who_chat_in_group += get_recent_group_speaker(stream_id, (chat_stream.user_info.user_id, chat_stream.user_info.platform), limit=global_config.MAX_CONTEXT_SIZE) + for person in who_chat_in_group: + relationship_value = relationship_manager.get_relationship(person).relationship_value + if person.user_info.user_cardname: + relation_prompt += f"你对昵称为'[({person.user_info.user_id}){person.user_info.user_nickname}]{person.user_info.user_cardname}'的用户的态度为" + relation_prompt2 += f"你对昵称为'[({person.user_info.user_id}){person.user_info.user_nickname}]{person.user_info.user_cardname}'的用户的回复态度为" + else: + relation_prompt += f"你对昵称为'({person.user_info.user_id}){person.user_info.user_nickname}'的用户的态度为" + relation_prompt2 += f"你对昵称为'({person.user_info.user_id}){person.user_info.user_nickname}'的用户的回复态度为" + relationship_level_num = 2 + # position_attitude_num = 1 + if -1000 <= relationship_value < -227: + relationship_level_num = 0 + # position_attitude_num = 0 + elif -227 <= relationship_value < -73: + relationship_level_num = 1 + # position_attitude_num = 0 + elif -76 <= relationship_value < 227: + relationship_level_num = 2 + # position_attitude_num = 1 + elif 227 <= relationship_value < 587: + relationship_level_num = 3 + # position_attitude_num = 2 + elif 587 <= relationship_value < 900: + relationship_level_num = 4 + # position_attitude_num = 2 + elif 900 <= relationship_value <= 1000: # 不是随便写的数据! + relationship_level_num = 5 + # position_attitude_num = 2 + else: + logger.debug("relationship_value 超出有效范围 (-1000 到 1000)") + relation_prompt2 += relation_prompt2_list[relationship_level_num] + "," + # position_attitude = position_attitude_list[position_attitude_num] + relation_prompt += relationship_level[relationship_level_num] + "," # 开始构建prompt diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py index 28e6b7f36..93b405f4c 100644 --- a/src/plugins/chat/utils.py +++ b/src/plugins/chat/utils.py @@ -195,6 +195,34 @@ def get_recent_group_detailed_plain_text(chat_stream_id: int, limit: int = 12, c return message_detailed_plain_text_list +def get_recent_group_speaker(chat_stream_id: int, sender, limit: int = 12) -> list: + # 获取当前群聊记录内发言的人 + recent_messages = list(db.messages.find( + {"chat_id": chat_stream_id}, + { + "chat_info": 1, + "user_info": 1, + } + ).sort("time", -1).limit(limit)) + + if not recent_messages: + return [] + + who_chat_in_group = [] + + duplicate_removal = [] + for msg_db_data in recent_messages: + user_info = UserInfo.from_dict(msg_db_data["user_info"]) + if (user_info.user_id, user_info.platform) != sender \ + and (user_info.user_id, user_info.platform) != (global_config.BOT_QQ, "qq") \ + and (user_info.user_id, user_info.platform) not in duplicate_removal: + + duplicate_removal.append((user_info.user_id, user_info.platform)) + chat_info = msg_db_data.get("chat_info", {}) + who_chat_in_group.append(ChatStream.from_dict(chat_info)) + return who_chat_in_group + + def split_into_sentences_w_remove_punctuation(text: str) -> List[str]: """将文本分割成句子,但保持书名号中的内容完整 Args: