From 0e982ebcab335986bee3b2aab25829f7288a01c6 Mon Sep 17 00:00:00 2001 From: SengokuCola <1026294844@qq.com> Date: Sun, 6 Jul 2025 23:18:12 +0800 Subject: [PATCH] =?UTF-8?q?better=EF=BC=9A=E4=BC=98=E5=8C=96=E5=85=B3?= =?UTF-8?q?=E7=B3=BBprompt=EF=BC=8C=E5=9B=9E=E9=80=80utils=E7=9A=84?= =?UTF-8?q?=E4=BF=AE=E6=94=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/llm_models/utils_model.py | 42 +++++++++++++------ src/person_info/relationship_fetcher.py | 55 +++++++++++++++++++++++-- src/person_info/relationship_manager.py | 1 + 3 files changed, 82 insertions(+), 16 deletions(-) diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 345e8ad1d..1077cfa09 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -124,20 +124,14 @@ class LLMRequest: self.model_name: str = model["name"] self.params = kwargs - self.enable_thinking = model.get("enable_thinking", None) + self.enable_thinking = model.get("enable_thinking", False) self.temp = model.get("temp", 0.7) - self.thinking_budget = model.get("thinking_budget", None) + self.thinking_budget = model.get("thinking_budget", 4096) self.stream = model.get("stream", False) self.pri_in = model.get("pri_in", 0) self.pri_out = model.get("pri_out", 0) self.max_tokens = model.get("max_tokens", global_config.model.model_max_output_length) # print(f"max_tokens: {self.max_tokens}") - custom_params_str = model.get("custom_params", "{}") - try: - self.custom_params = json.loads(custom_params_str) - except json.JSONDecodeError: - logger.error(f"Invalid JSON in custom_params for model '{self.model_name}': {custom_params_str}") - self.custom_params = {} # 获取数据库实例 self._init_database() @@ -255,6 +249,28 @@ class LLMRequest: elif payload is None: payload = await self._build_payload(prompt) + if stream_mode: + payload["stream"] = stream_mode + + if self.temp != 0.7: + payload["temperature"] = self.temp + + # 添加enable_thinking参数(如果不是默认值False) + if not self.enable_thinking: + payload["enable_thinking"] = False + + if self.thinking_budget != 4096: + payload["thinking_budget"] = self.thinking_budget + + if self.max_tokens: + payload["max_tokens"] = self.max_tokens + + # if "max_tokens" not in payload and "max_completion_tokens" not in payload: + # payload["max_tokens"] = global_config.model.model_max_output_length + # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 + if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: + payload["max_completion_tokens"] = payload.pop("max_tokens") + return { "policy": policy, "payload": payload, @@ -654,16 +670,18 @@ class LLMRequest: if self.temp != 0.7: payload["temperature"] = self.temp - # 仅当配置文件中存在参数时,添加对应参数 - if self.enable_thinking is not None: - payload["enable_thinking"] = self.enable_thinking + # 添加enable_thinking参数(如果不是默认值False) + if not self.enable_thinking: + payload["enable_thinking"] = False - if self.thinking_budget is not None: + if self.thinking_budget != 4096: payload["thinking_budget"] = self.thinking_budget if self.max_tokens: payload["max_tokens"] = self.max_tokens + # if "max_tokens" not in payload and "max_completion_tokens" not in payload: + # payload["max_tokens"] = global_config.model.model_max_output_length # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查 if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload: payload["max_completion_tokens"] = payload.pop("max_tokens") diff --git a/src/person_info/relationship_fetcher.py b/src/person_info/relationship_fetcher.py index f1c62851a..ea220e46a 100644 --- a/src/person_info/relationship_fetcher.py +++ b/src/person_info/relationship_fetcher.py @@ -9,7 +9,7 @@ from typing import List, Dict from json_repair import repair_json from src.chat.message_receive.chat_stream import get_chat_manager import json - +import random logger = get_logger("relationship_fetcher") @@ -100,23 +100,70 @@ class RelationshipFetcher: person_info_manager = get_person_info_manager() person_name = await person_info_manager.get_value(person_id, "person_name") short_impression = await person_info_manager.get_value(person_id, "short_impression") + + nickname_str = await person_info_manager.get_value(person_id, "nickname") + platform = await person_info_manager.get_value(person_id, "platform") + + if person_name == nickname_str and not short_impression: + return "" + + current_points = await person_info_manager.get_value(person_id, "points") or [] + + if isinstance(current_points, str): + try: + current_points = json.loads(current_points) + except json.JSONDecodeError: + logger.error(f"解析points JSON失败: {current_points}") + current_points = [] + elif not isinstance(current_points, list): + current_points = [] + + # 按时间排序forgotten_points + current_points.sort(key=lambda x: x[2]) + # 按权重加权随机抽取3个points,point[1]的值在1-10之间,权重越高被抽到概率越大 + if len(current_points) > 3: + # point[1] 取值范围1-10,直接作为权重 + weights = [max(1, min(10, int(point[1]))) for point in current_points] + points = random.choices(current_points, weights=weights, k=3) + else: + points = current_points + + # 构建points文本 + points_text = "\n".join([f"{point[2]}:{point[0]}" for point in points]) info_type = await self._build_fetch_query(person_id, target_message, chat_history) if info_type: await self._extract_single_info(person_id, info_type, person_name) relation_info = self._organize_known_info() + + nickname_str = "" + if person_name != nickname_str: + nickname_str = f"(ta在{platform}上的昵称是{nickname_str})" + if short_impression and relation_info: - relation_info = f"你对{person_name}的印象是:{short_impression}。具体来说:{relation_info}" + if points_text: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}。你还记得ta最近做的事:{points_text}" + else: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。具体来说:{relation_info}" elif short_impression: - relation_info = f"你对{person_name}的印象是:{short_impression}" + if points_text: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}。你还记得ta最近做的事:{points_text}" + else: + relation_info = f"你对{person_name}的印象是{nickname_str}:{short_impression}" elif relation_info: - relation_info = f"你对{person_name}的了解:{relation_info}" + if points_text: + relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}。你还记得ta最近做的事:{points_text}" + else: + relation_info = f"你对{person_name}的了解{nickname_str}:{relation_info}" + elif points_text: + relation_info = f"你记得{person_name}{nickname_str}最近做的事:{points_text}" else: relation_info = "" return relation_info + async def _build_fetch_query(self, person_id, target_message, chat_history): nickname_str = ",".join(global_config.bot.alias_names) name_block = f"你的名字是{global_config.bot.nickname},你的昵称有{nickname_str},有人也会用这些昵称称呼你。" diff --git a/src/person_info/relationship_manager.py b/src/person_info/relationship_manager.py index 6a25f8716..2d37bcda8 100644 --- a/src/person_info/relationship_manager.py +++ b/src/person_info/relationship_manager.py @@ -126,6 +126,7 @@ class RelationshipManager: short_impression = await person_info_manager.get_value(person_id, "short_impression") current_points = await person_info_manager.get_value(person_id, "points") or [] + print(f"current_points: {current_points}") if isinstance(current_points, str): try: current_points = json.loads(current_points)