fix:移除了部分token限制

This commit is contained in:
SengokuCola
2025-06-09 00:32:30 +08:00
parent 79405d1871
commit 1e51717796
14 changed files with 7 additions and 24 deletions

View File

@@ -77,7 +77,6 @@ class DefaultExpressor:
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
model=global_config.model.replyer_1,
max_tokens=256,
request_type="focus.expressor",
)
self.heart_fc_sender = HeartFCSender()

View File

@@ -70,7 +70,6 @@ class ExpressionLearner:
self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.model.replyer_1,
temperature=0.1,
max_tokens=256,
request_type="expressor.learner",
)

View File

@@ -31,7 +31,6 @@ class ChattingInfoProcessor(BaseProcessor):
self.model_summary = LLMRequest(
model=global_config.model.utils_small,
temperature=0.7,
max_tokens=300,
request_type="focus.observation.chat",
)

View File

@@ -69,7 +69,6 @@ class MindProcessor(BaseProcessor):
self.llm_model = LLMRequest(
model=global_config.model.planner,
max_tokens=800,
request_type="focus.processor.chat_mind",
)

View File

@@ -37,7 +37,7 @@ def init_prompt():
你不同程度上认识群聊里的人,你可以根据聊天记录,回忆起有关他们的信息,帮助你参与聊天
1.你需要提供用户名,以及你想要提取的信息名称类型来进行调取
2.你也可以完全不输出任何信息
3.如果短期内已经回忆过某个人的信息,请不要重复调取,除非你忘记了
3.阅读调取记录,如果已经回忆过某个人的信息,请不要重复调取,除非你忘记了
请以json格式输出例如
@@ -95,7 +95,7 @@ class RelationshipProcessor(BaseProcessor):
self.llm_model = LLMRequest(
model=global_config.model.relation,
max_tokens=800,
request_type="relation",
request_type="focus.relationship",
)
name = chat_manager.get_stream_name(self.subheartflow_id)
@@ -206,10 +206,10 @@ class RelationshipProcessor(BaseProcessor):
)
try:
logger.info(f"{self.log_prefix} 人物信息prompt: \n{prompt}\n")
# logger.info(f"{self.log_prefix} 人物信息prompt: \n{prompt}\n")
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
if content:
print(f"content: {content}")
# print(f"content: {content}")
content_json = json.loads(repair_json(content))
for person_name, info_type in content_json.items():
@@ -347,7 +347,7 @@ class RelationshipProcessor(BaseProcessor):
try:
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
logger.info(f"{self.log_prefix} fetch_person_info prompt: \n{prompt}\n")
# logger.info(f"{self.log_prefix} fetch_person_info prompt: \n{prompt}\n")
logger.info(f"{self.log_prefix} fetch_person_info 结果: {content}")
if content:

View File

@@ -72,7 +72,6 @@ class MemoryActivator:
self.summary_model = LLMRequest(
model=global_config.model.memory_summary,
temperature=0.7,
max_tokens=50,
request_type="focus.memory_activator",
)
self.running_memory = []

View File

@@ -348,7 +348,6 @@ class PluginAction(BaseAction):
self,
prompt: str,
model_config: Dict[str, Any],
max_tokens: int = 2000,
request_type: str = "plugin.generate",
**kwargs
) -> Tuple[bool, str]:
@@ -372,7 +371,6 @@ class PluginAction(BaseAction):
llm_request = LLMRequest(
model=model_config,
max_tokens=max_tokens,
request_type=request_type,
**kwargs
)

View File

@@ -88,8 +88,7 @@ class DefaultReplyer:
# TODO: API-Adapter修改标记
self.express_model = LLMRequest(
model=global_config.model.replyer_1,
max_tokens=256,
request_type="focus.expressor",
request_type="focus.replyer",
)
self.heart_fc_sender = HeartFCSender()

View File

@@ -35,7 +35,6 @@ class MemoryManager:
self.llm_summarizer = LLMRequest(
model=global_config.model.focus_working_memory,
temperature=0.3,
max_tokens=512,
request_type="focus.processor.working_memory",
)

View File

@@ -19,19 +19,15 @@ class NormalChatGenerator:
# TODO: API-Adapter修改标记
self.model_reasoning = LLMRequest(
model=global_config.model.replyer_1,
# temperature=0.7,
max_tokens=3000,
request_type="normal.chat_1",
)
self.model_normal = LLMRequest(
model=global_config.model.replyer_2,
# temperature=global_config.model.replyer_2["temp"],
max_tokens=256,
request_type="normal.chat_2",
)
self.model_sum = LLMRequest(
model=global_config.model.memory_summary, temperature=0.7, max_tokens=3000, request_type="relation"
model=global_config.model.memory_summary, temperature=0.7, request_type="relation"
)
self.current_model_type = "r1" # 默认使用 R1
self.current_model_name = "unknown model"

View File

@@ -110,7 +110,6 @@ class ActionPlanner:
self.llm = LLMRequest(
model=global_config.llm_PFC_action_planner,
temperature=global_config.llm_PFC_action_planner["temp"],
max_tokens=1500,
request_type="action_planning",
)
self.personality_info = individuality.get_prompt(x_person=2, level=3)

View File

@@ -89,7 +89,6 @@ class ReplyGenerator:
self.llm = LLMRequest(
model=global_config.llm_PFC_chat,
temperature=global_config.llm_PFC_chat["temp"],
max_tokens=300,
request_type="reply_generation",
)
self.personality_info = individuality.get_prompt(x_person=2, level=3)

View File

@@ -56,7 +56,6 @@ class PersonInfoManager:
# TODO: API-Adapter修改标记
self.qv_name_llm = LLMRequest(
model=global_config.model.utils,
max_tokens=256,
request_type="relation.qv_name",
)
try:

View File

@@ -128,7 +128,6 @@ class RelationshipProcessor:
self.llm_model = LLMRequest(
model=global_config.model.relation,
max_tokens=800,
request_type="relation",
)