fix:统一llm请求函数,改动模型名称
This commit is contained in:
@@ -76,8 +76,7 @@ class DefaultExpressor:
|
||||
self.log_prefix = "expressor"
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_model = LLMRequest(
|
||||
model=global_config.model.focus_expressor,
|
||||
# temperature=global_config.model.focus_expressor["temp"],
|
||||
model=global_config.model.replyer_1,
|
||||
max_tokens=256,
|
||||
request_type="focus.expressor",
|
||||
)
|
||||
|
||||
@@ -65,7 +65,7 @@ class ExpressionLearner:
|
||||
def __init__(self) -> None:
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_learn_model: LLMRequest = LLMRequest(
|
||||
model=global_config.model.focus_expressor,
|
||||
model=global_config.model.replyer_1,
|
||||
temperature=0.1,
|
||||
max_tokens=256,
|
||||
request_type="expressor.learner",
|
||||
|
||||
@@ -28,9 +28,7 @@ def init_prompt():
|
||||
|
||||
请仔细分析聊天内容,考虑以下几点:
|
||||
1. 内容中是否包含需要查询信息的问题
|
||||
2. 是否需要执行特定操作
|
||||
3. 是否有明确的工具使用指令
|
||||
4. 考虑用户与你的关系以及当前的对话氛围
|
||||
2. 是否有明确的工具使用指令
|
||||
|
||||
If you need to use a tool, please directly call the corresponding tool function. If you do not need to use any tool, simply output "No tool needed".
|
||||
"""
|
||||
@@ -146,23 +144,27 @@ class ToolProcessor(BaseProcessor):
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"tool_executor_prompt",
|
||||
memory_str=memory_str,
|
||||
# extra_info="extra_structured_info",
|
||||
chat_observe_info=chat_observe_info,
|
||||
# chat_target_name=chat_target_name,
|
||||
is_group_chat=is_group_chat,
|
||||
# relation_prompt=relation_prompt,
|
||||
# prompt_personality=prompt_personality,
|
||||
# mood_info=mood_info,
|
||||
bot_name=individuality.name,
|
||||
time_now=time_now,
|
||||
)
|
||||
|
||||
# 调用LLM,专注于工具使用
|
||||
logger.debug(f"开始执行工具调用{prompt}")
|
||||
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
|
||||
# logger.info(f"开始执行工具调用{prompt}")
|
||||
response, other_info = await self.llm_model.generate_response_async(
|
||||
prompt=prompt, tools=tools
|
||||
)
|
||||
|
||||
if len(other_info) == 3:
|
||||
reasoning_content, model_name, tool_calls = other_info
|
||||
else:
|
||||
reasoning_content, model_name = other_info
|
||||
tool_calls = None
|
||||
|
||||
# print("tooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltool")
|
||||
if tool_calls:
|
||||
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
|
||||
logger.info(f"获取到工具原始输出:\n{tool_calls}")
|
||||
# 处理工具调用和结果收集,类似于SubMind中的逻辑
|
||||
new_structured_items = []
|
||||
used_tools = [] # 记录使用了哪些工具
|
||||
|
||||
@@ -112,14 +112,9 @@ class MemoryActivator:
|
||||
|
||||
# logger.debug(f"prompt: {prompt}")
|
||||
|
||||
response = await self.summary_model.generate_response(prompt)
|
||||
response, (reasoning_content, model_name) = await self.summary_model.generate_response_async(prompt)
|
||||
|
||||
# logger.debug(f"response: {response}")
|
||||
|
||||
# 只取response的第一个元素(字符串)
|
||||
response_str = response[0]
|
||||
# print(f"response_str: {response_str[1]}")
|
||||
keywords = list(get_keywords_from_json(response_str))
|
||||
keywords = list(get_keywords_from_json(response))
|
||||
|
||||
# 更新关键词缓存
|
||||
if keywords:
|
||||
|
||||
@@ -123,12 +123,13 @@ class EmojiAction(BaseAction):
|
||||
)
|
||||
|
||||
reply_text = ""
|
||||
for reply in reply_set:
|
||||
type = reply[0]
|
||||
data = reply[1]
|
||||
if type == "text":
|
||||
reply_text += data
|
||||
elif type == "emoji":
|
||||
reply_text += data
|
||||
if reply_set:
|
||||
for reply in reply_set:
|
||||
type = reply[0]
|
||||
data = reply[1]
|
||||
if type == "text":
|
||||
reply_text += data
|
||||
elif type == "emoji":
|
||||
reply_text += data
|
||||
|
||||
return success, reply_text
|
||||
|
||||
@@ -87,8 +87,7 @@ class DefaultReplyer:
|
||||
self.log_prefix = "replyer"
|
||||
# TODO: API-Adapter修改标记
|
||||
self.express_model = LLMRequest(
|
||||
model=global_config.model.focus_expressor,
|
||||
# temperature=global_config.model.focus_expressor["temp"],
|
||||
model=global_config.model.replyer_1,
|
||||
max_tokens=256,
|
||||
request_type="focus.expressor",
|
||||
)
|
||||
|
||||
@@ -346,10 +346,10 @@ class Hippocampus:
|
||||
# 使用LLM提取关键词
|
||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||
# logger.info(f"提取关键词数量: {topic_num}")
|
||||
topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
|
||||
topics_response, (reasoning_content, model_name) = await self.model_summary.generate_response_async(self.find_topic_llm(text, topic_num))
|
||||
|
||||
# 提取关键词
|
||||
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
||||
keywords = re.findall(r"<([^>]+)>", topics_response)
|
||||
if not keywords:
|
||||
keywords = []
|
||||
else:
|
||||
@@ -701,10 +701,10 @@ class Hippocampus:
|
||||
# 使用LLM提取关键词
|
||||
topic_num = min(5, max(1, int(len(text) * 0.1))) # 根据文本长度动态调整关键词数量
|
||||
# logger.info(f"提取关键词数量: {topic_num}")
|
||||
topics_response = await self.model_summary.generate_response(self.find_topic_llm(text, topic_num))
|
||||
topics_response, (reasoning_content, model_name) = await self.model_summary.generate_response_async(self.find_topic_llm(text, topic_num))
|
||||
|
||||
# 提取关键词
|
||||
keywords = re.findall(r"<([^>]+)>", topics_response[0])
|
||||
keywords = re.findall(r"<([^>]+)>", topics_response)
|
||||
if not keywords:
|
||||
keywords = []
|
||||
else:
|
||||
@@ -1248,12 +1248,12 @@ class ParahippocampalGyrus:
|
||||
|
||||
# 2. 使用LLM提取关键主题
|
||||
topic_num = self.hippocampus.calculate_topic_num(input_text, compress_rate)
|
||||
topics_response = await self.hippocampus.model_summary.generate_response(
|
||||
topics_response, (reasoning_content, model_name) = await self.hippocampus.model_summary.generate_response_async(
|
||||
self.hippocampus.find_topic_llm(input_text, topic_num)
|
||||
)
|
||||
|
||||
# 提取<>中的内容
|
||||
topics = re.findall(r"<([^>]+)>", topics_response[0])
|
||||
topics = re.findall(r"<([^>]+)>", topics_response)
|
||||
|
||||
if not topics:
|
||||
topics = ["none"]
|
||||
|
||||
@@ -18,14 +18,14 @@ class NormalChatGenerator:
|
||||
def __init__(self):
|
||||
# TODO: API-Adapter修改标记
|
||||
self.model_reasoning = LLMRequest(
|
||||
model=global_config.model.normal_chat_1,
|
||||
model=global_config.model.replyer_1,
|
||||
# temperature=0.7,
|
||||
max_tokens=3000,
|
||||
request_type="normal.chat_1",
|
||||
)
|
||||
self.model_normal = LLMRequest(
|
||||
model=global_config.model.normal_chat_2,
|
||||
# temperature=global_config.model.normal_chat_2["temp"],
|
||||
model=global_config.model.replyer_2,
|
||||
# temperature=global_config.model.replyer_2["temp"],
|
||||
max_tokens=256,
|
||||
request_type="normal.chat_2",
|
||||
)
|
||||
@@ -103,7 +103,7 @@ class NormalChatGenerator:
|
||||
logger.debug(f"构建prompt时间: {t_build_prompt.human_readable}")
|
||||
|
||||
try:
|
||||
content, reasoning_content, self.current_model_name = await model.generate_response(prompt)
|
||||
content, (reasoning_content, model_name) = await model.generate_response_async(prompt)
|
||||
|
||||
logger.debug(f"prompt:{prompt}\n生成回复:{content}")
|
||||
|
||||
@@ -147,7 +147,7 @@ class NormalChatGenerator:
|
||||
"""
|
||||
|
||||
# 调用模型生成结果
|
||||
result, _, _ = await self.model_sum.generate_response(prompt)
|
||||
result, (reasoning_content, model_name) = await self.model_sum.generate_response_async(prompt)
|
||||
result = result.strip()
|
||||
|
||||
# 解析模型输出的结果
|
||||
|
||||
@@ -148,10 +148,12 @@ class NormalChatPlanner:
|
||||
|
||||
# 使用LLM生成动作决策
|
||||
try:
|
||||
content, reasoning_content, model_name = await self.planner_llm.generate_response(prompt)
|
||||
content, (reasoning_content, model_name) = await self.planner_llm.generate_response_async(prompt)
|
||||
|
||||
logger.info(f"{self.log_prefix}规划器原始提示词: {prompt}")
|
||||
logger.info(f"{self.log_prefix}规划器原始响应: {content}")
|
||||
logger.info(f"{self.log_prefix}规划器推理: {reasoning_content}")
|
||||
logger.info(f"{self.log_prefix}规划器模型: {model_name}")
|
||||
|
||||
# 解析JSON响应
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user