fix:统一llm请求函数,改动模型名称

This commit is contained in:
SengokuCola
2025-06-04 23:27:24 +08:00
parent 077b67fa60
commit 4ebfca096c
17 changed files with 116 additions and 187 deletions

View File

@@ -76,8 +76,7 @@ class DefaultExpressor:
self.log_prefix = "expressor"
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
model=global_config.model.focus_expressor,
# temperature=global_config.model.focus_expressor["temp"],
model=global_config.model.replyer_1,
max_tokens=256,
request_type="focus.expressor",
)

View File

@@ -65,7 +65,7 @@ class ExpressionLearner:
def __init__(self) -> None:
# TODO: API-Adapter修改标记
self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.model.focus_expressor,
model=global_config.model.replyer_1,
temperature=0.1,
max_tokens=256,
request_type="expressor.learner",

View File

@@ -28,9 +28,7 @@ def init_prompt():
请仔细分析聊天内容,考虑以下几点:
1. 内容中是否包含需要查询信息的问题
2. 是否需要执行特定操作
3. 是否有明确的工具使用指令
4. 考虑用户与你的关系以及当前的对话氛围
2. 是否有明确的工具使用指令
If you need to use a tool, please directly call the corresponding tool function. If you do not need to use any tool, simply output "No tool needed".
"""
@@ -146,23 +144,27 @@ class ToolProcessor(BaseProcessor):
prompt = await global_prompt_manager.format_prompt(
"tool_executor_prompt",
memory_str=memory_str,
# extra_info="extra_structured_info",
chat_observe_info=chat_observe_info,
# chat_target_name=chat_target_name,
is_group_chat=is_group_chat,
# relation_prompt=relation_prompt,
# prompt_personality=prompt_personality,
# mood_info=mood_info,
bot_name=individuality.name,
time_now=time_now,
)
# 调用LLM专注于工具使用
logger.debug(f"开始执行工具调用{prompt}")
response, _, tool_calls = await self.llm_model.generate_response_tool_async(prompt=prompt, tools=tools)
# logger.info(f"开始执行工具调用{prompt}")
response, other_info = await self.llm_model.generate_response_async(
prompt=prompt, tools=tools
)
if len(other_info) == 3:
reasoning_content, model_name, tool_calls = other_info
else:
reasoning_content, model_name = other_info
tool_calls = None
# print("tooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltooltool")
if tool_calls:
logger.debug(f"获取到工具原始输出:\n{tool_calls}")
logger.info(f"获取到工具原始输出:\n{tool_calls}")
# 处理工具调用和结果收集类似于SubMind中的逻辑
new_structured_items = []
used_tools = [] # 记录使用了哪些工具

View File

@@ -112,14 +112,9 @@ class MemoryActivator:
# logger.debug(f"prompt: {prompt}")
response = await self.summary_model.generate_response(prompt)
response, (reasoning_content, model_name) = await self.summary_model.generate_response_async(prompt)
# logger.debug(f"response: {response}")
# 只取response的第一个元素字符串
response_str = response[0]
# print(f"response_str: {response_str[1]}")
keywords = list(get_keywords_from_json(response_str))
keywords = list(get_keywords_from_json(response))
# 更新关键词缓存
if keywords:

View File

@@ -123,12 +123,13 @@ class EmojiAction(BaseAction):
)
reply_text = ""
for reply in reply_set:
type = reply[0]
data = reply[1]
if type == "text":
reply_text += data
elif type == "emoji":
reply_text += data
if reply_set:
for reply in reply_set:
type = reply[0]
data = reply[1]
if type == "text":
reply_text += data
elif type == "emoji":
reply_text += data
return success, reply_text

View File

@@ -87,8 +87,7 @@ class DefaultReplyer:
self.log_prefix = "replyer"
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
model=global_config.model.focus_expressor,
# temperature=global_config.model.focus_expressor["temp"],
model=global_config.model.replyer_1,
max_tokens=256,
request_type="focus.expressor",
)