fix:修复模型配置应用错误,修复no_action执行错误
This commit is contained in:
@@ -69,11 +69,13 @@ def init_prompt():
|
|||||||
class MemoryActivator:
|
class MemoryActivator:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.summary_model = LLMRequest(
|
|
||||||
model=global_config.model.memory_summary,
|
self.key_words_model = LLMRequest(
|
||||||
temperature=0.7,
|
model=global_config.model.utils_small,
|
||||||
|
temperature=0.5,
|
||||||
request_type="memory_activator",
|
request_type="memory_activator",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.running_memory = []
|
self.running_memory = []
|
||||||
self.cached_keywords = set() # 用于缓存历史关键词
|
self.cached_keywords = set() # 用于缓存历史关键词
|
||||||
|
|
||||||
@@ -97,7 +99,7 @@ class MemoryActivator:
|
|||||||
|
|
||||||
# logger.debug(f"prompt: {prompt}")
|
# logger.debug(f"prompt: {prompt}")
|
||||||
|
|
||||||
response, (reasoning_content, model_name) = await self.summary_model.generate_response_async(prompt)
|
response, (reasoning_content, model_name) = await self.key_words_model.generate_response_async(prompt)
|
||||||
|
|
||||||
keywords = list(get_keywords_from_json(response))
|
keywords = list(get_keywords_from_json(response))
|
||||||
|
|
||||||
|
|||||||
@@ -154,8 +154,7 @@ class ActionPlanner:
|
|||||||
action_data[key] = value
|
action_data[key] = value
|
||||||
|
|
||||||
if action == "no_action":
|
if action == "no_action":
|
||||||
action = "no_reply"
|
reasoning = "normal决定不使用额外动作"
|
||||||
reasoning = "决定不使用额外动作"
|
|
||||||
elif action not in current_available_actions:
|
elif action not in current_available_actions:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
|
f"{self.log_prefix}LLM 返回了当前不可用或无效的动作: '{action}' (可用: {list(current_available_actions.keys())}),将强制使用 'no_reply'"
|
||||||
|
|||||||
@@ -70,14 +70,14 @@ class RelationshipFetcher:
|
|||||||
|
|
||||||
# LLM模型配置
|
# LLM模型配置
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.relation,
|
model=global_config.model.utils_small,
|
||||||
request_type="relation",
|
request_type="relation.fetcher",
|
||||||
)
|
)
|
||||||
|
|
||||||
# 小模型用于即时信息提取
|
# 小模型用于即时信息提取
|
||||||
self.instant_llm_model = LLMRequest(
|
self.instant_llm_model = LLMRequest(
|
||||||
model=global_config.model.utils_small,
|
model=global_config.model.utils_small,
|
||||||
request_type="relation.instant",
|
request_type="relation.fetch",
|
||||||
)
|
)
|
||||||
|
|
||||||
name = get_chat_manager().get_stream_name(self.chat_id)
|
name = get_chat_manager().get_stream_name(self.chat_id)
|
||||||
|
|||||||
Reference in New Issue
Block a user