remove:移除willing系统,移除reply2,移除能量值,移除reply_to改为message
This commit is contained in:
@@ -121,40 +121,11 @@ class DefaultReplyer:
|
||||
def __init__(
|
||||
self,
|
||||
chat_stream: ChatStream,
|
||||
model_set_with_weight: Optional[List[Tuple[TaskConfig, float]]] = None,
|
||||
request_type: str = "focus.replyer",
|
||||
request_type: str = "replyer",
|
||||
):
|
||||
self.request_type = request_type
|
||||
|
||||
if model_set_with_weight:
|
||||
# self.express_model_configs = model_configs
|
||||
self.model_set: List[Tuple[TaskConfig, float]] = model_set_with_weight
|
||||
else:
|
||||
# 当未提供配置时,使用默认配置并赋予默认权重
|
||||
|
||||
# model_config_1 = global_config.model.replyer_1.copy()
|
||||
# model_config_2 = global_config.model.replyer_2.copy()
|
||||
prob_first = global_config.chat.replyer_random_probability
|
||||
|
||||
# model_config_1["weight"] = prob_first
|
||||
# model_config_2["weight"] = 1.0 - prob_first
|
||||
|
||||
# self.express_model_configs = [model_config_1, model_config_2]
|
||||
self.model_set = [
|
||||
(model_config.model_task_config.replyer_1, prob_first),
|
||||
(model_config.model_task_config.replyer_2, 1.0 - prob_first),
|
||||
]
|
||||
|
||||
# if not self.express_model_configs:
|
||||
# logger.warning("未找到有效的模型配置,回复生成可能会失败。")
|
||||
# # 提供一个最终的回退,以防止在空列表上调用 random.choice
|
||||
# fallback_config = global_config.model.replyer_1.copy()
|
||||
# fallback_config.setdefault("weight", 1.0)
|
||||
# self.express_model_configs = [fallback_config]
|
||||
|
||||
self.express_model = LLMRequest(model_set=model_config.model_task_config.replyer, request_type=request_type)
|
||||
self.chat_stream = chat_stream
|
||||
self.is_group_chat, self.chat_target_info = get_chat_type_and_target_info(self.chat_stream.stream_id)
|
||||
|
||||
self.heart_fc_sender = HeartFCSender()
|
||||
self.memory_activator = MemoryActivator()
|
||||
self.instant_memory = InstantMemory(chat_id=self.chat_stream.stream_id)
|
||||
@@ -163,14 +134,6 @@ class DefaultReplyer:
|
||||
|
||||
self.tool_executor = ToolExecutor(chat_id=self.chat_stream.stream_id, enable_cache=True, cache_ttl=3)
|
||||
|
||||
def _select_weighted_models_config(self) -> Tuple[TaskConfig, float]:
|
||||
"""使用加权随机选择来挑选一个模型配置"""
|
||||
configs = self.model_set
|
||||
# 提取权重,如果模型配置中没有'weight'键,则默认为1.0
|
||||
weights = [weight for _, weight in configs]
|
||||
|
||||
return random.choices(population=configs, weights=weights, k=1)[0]
|
||||
|
||||
async def generate_reply_with_context(
|
||||
self,
|
||||
reply_to: str = "",
|
||||
@@ -179,8 +142,8 @@ class DefaultReplyer:
|
||||
enable_tool: bool = True,
|
||||
from_plugin: bool = True,
|
||||
stream_id: Optional[str] = None,
|
||||
reply_message: Optional[Dict[str, Any]] = None,
|
||||
) -> Tuple[bool, Optional[Dict[str, Any]], Optional[str]]:
|
||||
# sourcery skip: merge-nested-ifs
|
||||
"""
|
||||
回复器 (Replier): 负责生成回复文本的核心逻辑。
|
||||
|
||||
@@ -205,6 +168,7 @@ class DefaultReplyer:
|
||||
extra_info=extra_info,
|
||||
available_actions=available_actions,
|
||||
enable_tool=enable_tool,
|
||||
reply_message=reply_message,
|
||||
)
|
||||
|
||||
if not prompt:
|
||||
@@ -302,16 +266,11 @@ class DefaultReplyer:
|
||||
traceback.print_exc()
|
||||
return False, None, prompt if return_prompt else None
|
||||
|
||||
async def build_relation_info(self, reply_to: str = ""):
|
||||
async def build_relation_info(self, sender: str, target: str):
|
||||
if not global_config.relationship.enable_relationship:
|
||||
return ""
|
||||
|
||||
relationship_fetcher = relationship_fetcher_manager.get_fetcher(self.chat_stream.stream_id)
|
||||
if not reply_to:
|
||||
return ""
|
||||
sender, text = self._parse_reply_target(reply_to)
|
||||
if not sender or not text:
|
||||
return ""
|
||||
|
||||
# 获取用户ID
|
||||
person_info_manager = get_person_info_manager()
|
||||
@@ -418,7 +377,7 @@ class DefaultReplyer:
|
||||
|
||||
return memory_str
|
||||
|
||||
async def build_tool_info(self, chat_history: str, reply_to: str = "", enable_tool: bool = True) -> str:
|
||||
async def build_tool_info(self, chat_history: str, sender: str, target: str, enable_tool: bool = True) -> str:
|
||||
"""构建工具信息块
|
||||
|
||||
Args:
|
||||
@@ -433,18 +392,11 @@ class DefaultReplyer:
|
||||
if not enable_tool:
|
||||
return ""
|
||||
|
||||
if not reply_to:
|
||||
return ""
|
||||
|
||||
sender, text = self._parse_reply_target(reply_to)
|
||||
|
||||
if not text:
|
||||
return ""
|
||||
|
||||
try:
|
||||
# 使用工具执行器获取信息
|
||||
tool_results, _, _ = await self.tool_executor.execute_from_chat_message(
|
||||
sender=sender, target_message=text, chat_history=chat_history, return_details=False
|
||||
sender=sender, target_message=target, chat_history=chat_history, return_details=False
|
||||
)
|
||||
|
||||
if tool_results:
|
||||
@@ -672,7 +624,8 @@ class DefaultReplyer:
|
||||
extra_info: str = "",
|
||||
available_actions: Optional[Dict[str, ActionInfo]] = None,
|
||||
enable_tool: bool = True,
|
||||
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
|
||||
reply_message: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
构建回复器上下文
|
||||
|
||||
@@ -682,7 +635,7 @@ class DefaultReplyer:
|
||||
available_actions: 可用动作
|
||||
enable_timeout: 是否启用超时处理
|
||||
enable_tool: 是否启用工具调用
|
||||
|
||||
reply_message: 回复的原始消息
|
||||
Returns:
|
||||
str: 构建好的上下文
|
||||
"""
|
||||
@@ -698,8 +651,21 @@ class DefaultReplyer:
|
||||
mood_prompt = chat_mood.mood_state
|
||||
else:
|
||||
mood_prompt = ""
|
||||
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
|
||||
if reply_to:
|
||||
#兼容旧的reply_to
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
else:
|
||||
# 获取 platform,如果不存在则从 chat_stream 获取,如果还是 None 则使用默认值
|
||||
platform = reply_message.get("chat_info_platform")
|
||||
person_id = person_info_manager.get_person_id(
|
||||
platform, # type: ignore
|
||||
reply_message.get("user_id"), # type: ignore
|
||||
)
|
||||
person_name = await person_info_manager.get_value(person_id, "person_name")
|
||||
sender = person_name
|
||||
target = reply_message.get('processed_plain_text')
|
||||
|
||||
person_info_manager = get_person_info_manager()
|
||||
person_id = person_info_manager.get_person_id_by_person_name(sender)
|
||||
user_id = person_info_manager.get_value_sync(person_id, "user_id")
|
||||
@@ -744,12 +710,12 @@ class DefaultReplyer:
|
||||
self._time_and_run_task(
|
||||
self.build_expression_habits(chat_talking_prompt_short, target), "expression_habits"
|
||||
),
|
||||
self._time_and_run_task(self.build_relation_info(reply_to), "relation_info"),
|
||||
self._time_and_run_task(self.build_relation_info(sender, target), "relation_info"),
|
||||
self._time_and_run_task(self.build_memory_block(chat_talking_prompt_short, target), "memory_block"),
|
||||
self._time_and_run_task(
|
||||
self.build_tool_info(chat_talking_prompt_short, reply_to, enable_tool=enable_tool), "tool_info"
|
||||
self.build_tool_info(chat_talking_prompt_short, sender, target, enable_tool=enable_tool), "tool_info"
|
||||
),
|
||||
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, reply_to), "prompt_info"),
|
||||
self._time_and_run_task(self.get_prompt_info(chat_talking_prompt_short, sender, target), "prompt_info"),
|
||||
)
|
||||
|
||||
# 任务名称中英文映射
|
||||
@@ -899,12 +865,17 @@ class DefaultReplyer:
|
||||
raw_reply: str,
|
||||
reason: str,
|
||||
reply_to: str,
|
||||
reply_message: Optional[Dict[str, Any]] = None,
|
||||
) -> str: # sourcery skip: merge-else-if-into-elif, remove-redundant-if
|
||||
chat_stream = self.chat_stream
|
||||
chat_id = chat_stream.stream_id
|
||||
is_group_chat = bool(chat_stream.group_info)
|
||||
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
if reply_message:
|
||||
sender = reply_message.get("sender")
|
||||
target = reply_message.get("target")
|
||||
else:
|
||||
sender, target = self._parse_reply_target(reply_to)
|
||||
|
||||
# 添加情绪状态获取
|
||||
if global_config.mood.enable_mood:
|
||||
@@ -930,7 +901,7 @@ class DefaultReplyer:
|
||||
# 并行执行2个构建任务
|
||||
expression_habits_block, relation_info = await asyncio.gather(
|
||||
self.build_expression_habits(chat_talking_prompt_half, target),
|
||||
self.build_relation_info(reply_to),
|
||||
self.build_relation_info(sender, target),
|
||||
)
|
||||
|
||||
keywords_reaction_prompt = await self.build_keywords_reaction_prompt(target)
|
||||
@@ -1035,34 +1006,25 @@ class DefaultReplyer:
|
||||
|
||||
async def llm_generate_content(self, prompt: str):
|
||||
with Timer("LLM生成", {}): # 内部计时器,可选保留
|
||||
# 加权随机选择一个模型配置
|
||||
selected_model_config, weight = self._select_weighted_models_config()
|
||||
logger.info(f"使用模型集生成回复: {selected_model_config} (选中概率: {weight})")
|
||||
|
||||
express_model = LLMRequest(model_set=selected_model_config, request_type=self.request_type)
|
||||
# 直接使用已初始化的模型实例
|
||||
logger.info(f"使用模型集生成回复: {self.express_model.model_for_task}")
|
||||
|
||||
if global_config.debug.show_prompt:
|
||||
logger.info(f"\n{prompt}\n")
|
||||
else:
|
||||
logger.debug(f"\n{prompt}\n")
|
||||
|
||||
content, (reasoning_content, model_name, tool_calls) = await express_model.generate_response_async(prompt)
|
||||
content, (reasoning_content, model_name, tool_calls) = await self.express_model.generate_response_async(prompt)
|
||||
|
||||
logger.debug(f"replyer生成内容: {content}")
|
||||
return content, reasoning_content, model_name, tool_calls
|
||||
|
||||
async def get_prompt_info(self, message: str, reply_to: str):
|
||||
async def get_prompt_info(self, message: str, sender: str, target: str):
|
||||
related_info = ""
|
||||
start_time = time.time()
|
||||
from src.plugins.built_in.knowledge.lpmm_get_knowledge import SearchKnowledgeFromLPMMTool
|
||||
|
||||
if not reply_to:
|
||||
logger.debug("没有回复对象,跳过获取知识库内容")
|
||||
return ""
|
||||
sender, content = self._parse_reply_target(reply_to)
|
||||
if not content:
|
||||
logger.debug("回复对象内容为空,跳过获取知识库内容")
|
||||
return ""
|
||||
|
||||
logger.debug(f"获取知识库内容,元消息:{message[:30]}...,消息长度: {len(message)}")
|
||||
# 从LPMM知识库获取知识
|
||||
try:
|
||||
@@ -1080,7 +1042,7 @@ class DefaultReplyer:
|
||||
time_now=time_now,
|
||||
chat_history=message,
|
||||
sender=sender,
|
||||
target_message=content,
|
||||
target_message=target,
|
||||
)
|
||||
_, _, _, _, tool_calls = await llm_api.generate_with_model_with_tools(
|
||||
prompt,
|
||||
|
||||
Reference in New Issue
Block a user