fix:移除了部分token限制
This commit is contained in:
@@ -77,7 +77,6 @@ class DefaultExpressor:
|
|||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.express_model = LLMRequest(
|
self.express_model = LLMRequest(
|
||||||
model=global_config.model.replyer_1,
|
model=global_config.model.replyer_1,
|
||||||
max_tokens=256,
|
|
||||||
request_type="focus.expressor",
|
request_type="focus.expressor",
|
||||||
)
|
)
|
||||||
self.heart_fc_sender = HeartFCSender()
|
self.heart_fc_sender = HeartFCSender()
|
||||||
|
|||||||
@@ -70,7 +70,6 @@ class ExpressionLearner:
|
|||||||
self.express_learn_model: LLMRequest = LLMRequest(
|
self.express_learn_model: LLMRequest = LLMRequest(
|
||||||
model=global_config.model.replyer_1,
|
model=global_config.model.replyer_1,
|
||||||
temperature=0.1,
|
temperature=0.1,
|
||||||
max_tokens=256,
|
|
||||||
request_type="expressor.learner",
|
request_type="expressor.learner",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -31,7 +31,6 @@ class ChattingInfoProcessor(BaseProcessor):
|
|||||||
self.model_summary = LLMRequest(
|
self.model_summary = LLMRequest(
|
||||||
model=global_config.model.utils_small,
|
model=global_config.model.utils_small,
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
max_tokens=300,
|
|
||||||
request_type="focus.observation.chat",
|
request_type="focus.observation.chat",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -69,7 +69,6 @@ class MindProcessor(BaseProcessor):
|
|||||||
|
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.planner,
|
model=global_config.model.planner,
|
||||||
max_tokens=800,
|
|
||||||
request_type="focus.processor.chat_mind",
|
request_type="focus.processor.chat_mind",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ def init_prompt():
|
|||||||
你不同程度上认识群聊里的人,你可以根据聊天记录,回忆起有关他们的信息,帮助你参与聊天
|
你不同程度上认识群聊里的人,你可以根据聊天记录,回忆起有关他们的信息,帮助你参与聊天
|
||||||
1.你需要提供用户名,以及你想要提取的信息名称类型来进行调取
|
1.你需要提供用户名,以及你想要提取的信息名称类型来进行调取
|
||||||
2.你也可以完全不输出任何信息
|
2.你也可以完全不输出任何信息
|
||||||
3.如果短期内已经回忆过某个人的信息,请不要重复调取,除非你忘记了
|
3.阅读调取记录,如果已经回忆过某个人的信息,请不要重复调取,除非你忘记了
|
||||||
|
|
||||||
请以json格式输出,例如:
|
请以json格式输出,例如:
|
||||||
|
|
||||||
@@ -95,7 +95,7 @@ class RelationshipProcessor(BaseProcessor):
|
|||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.relation,
|
model=global_config.model.relation,
|
||||||
max_tokens=800,
|
max_tokens=800,
|
||||||
request_type="relation",
|
request_type="focus.relationship",
|
||||||
)
|
)
|
||||||
|
|
||||||
name = chat_manager.get_stream_name(self.subheartflow_id)
|
name = chat_manager.get_stream_name(self.subheartflow_id)
|
||||||
@@ -206,10 +206,10 @@ class RelationshipProcessor(BaseProcessor):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
logger.info(f"{self.log_prefix} 人物信息prompt: \n{prompt}\n")
|
# logger.info(f"{self.log_prefix} 人物信息prompt: \n{prompt}\n")
|
||||||
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
||||||
if content:
|
if content:
|
||||||
print(f"content: {content}")
|
# print(f"content: {content}")
|
||||||
content_json = json.loads(repair_json(content))
|
content_json = json.loads(repair_json(content))
|
||||||
|
|
||||||
for person_name, info_type in content_json.items():
|
for person_name, info_type in content_json.items():
|
||||||
@@ -347,7 +347,7 @@ class RelationshipProcessor(BaseProcessor):
|
|||||||
try:
|
try:
|
||||||
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
content, _ = await self.llm_model.generate_response_async(prompt=prompt)
|
||||||
|
|
||||||
logger.info(f"{self.log_prefix} fetch_person_info prompt: \n{prompt}\n")
|
# logger.info(f"{self.log_prefix} fetch_person_info prompt: \n{prompt}\n")
|
||||||
logger.info(f"{self.log_prefix} fetch_person_info 结果: {content}")
|
logger.info(f"{self.log_prefix} fetch_person_info 结果: {content}")
|
||||||
|
|
||||||
if content:
|
if content:
|
||||||
|
|||||||
@@ -72,7 +72,6 @@ class MemoryActivator:
|
|||||||
self.summary_model = LLMRequest(
|
self.summary_model = LLMRequest(
|
||||||
model=global_config.model.memory_summary,
|
model=global_config.model.memory_summary,
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
max_tokens=50,
|
|
||||||
request_type="focus.memory_activator",
|
request_type="focus.memory_activator",
|
||||||
)
|
)
|
||||||
self.running_memory = []
|
self.running_memory = []
|
||||||
|
|||||||
@@ -348,7 +348,6 @@ class PluginAction(BaseAction):
|
|||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
model_config: Dict[str, Any],
|
model_config: Dict[str, Any],
|
||||||
max_tokens: int = 2000,
|
|
||||||
request_type: str = "plugin.generate",
|
request_type: str = "plugin.generate",
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> Tuple[bool, str]:
|
) -> Tuple[bool, str]:
|
||||||
@@ -372,7 +371,6 @@ class PluginAction(BaseAction):
|
|||||||
|
|
||||||
llm_request = LLMRequest(
|
llm_request = LLMRequest(
|
||||||
model=model_config,
|
model=model_config,
|
||||||
max_tokens=max_tokens,
|
|
||||||
request_type=request_type,
|
request_type=request_type,
|
||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -88,8 +88,7 @@ class DefaultReplyer:
|
|||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.express_model = LLMRequest(
|
self.express_model = LLMRequest(
|
||||||
model=global_config.model.replyer_1,
|
model=global_config.model.replyer_1,
|
||||||
max_tokens=256,
|
request_type="focus.replyer",
|
||||||
request_type="focus.expressor",
|
|
||||||
)
|
)
|
||||||
self.heart_fc_sender = HeartFCSender()
|
self.heart_fc_sender = HeartFCSender()
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,6 @@ class MemoryManager:
|
|||||||
self.llm_summarizer = LLMRequest(
|
self.llm_summarizer = LLMRequest(
|
||||||
model=global_config.model.focus_working_memory,
|
model=global_config.model.focus_working_memory,
|
||||||
temperature=0.3,
|
temperature=0.3,
|
||||||
max_tokens=512,
|
|
||||||
request_type="focus.processor.working_memory",
|
request_type="focus.processor.working_memory",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -19,19 +19,15 @@ class NormalChatGenerator:
|
|||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.model_reasoning = LLMRequest(
|
self.model_reasoning = LLMRequest(
|
||||||
model=global_config.model.replyer_1,
|
model=global_config.model.replyer_1,
|
||||||
# temperature=0.7,
|
|
||||||
max_tokens=3000,
|
|
||||||
request_type="normal.chat_1",
|
request_type="normal.chat_1",
|
||||||
)
|
)
|
||||||
self.model_normal = LLMRequest(
|
self.model_normal = LLMRequest(
|
||||||
model=global_config.model.replyer_2,
|
model=global_config.model.replyer_2,
|
||||||
# temperature=global_config.model.replyer_2["temp"],
|
|
||||||
max_tokens=256,
|
|
||||||
request_type="normal.chat_2",
|
request_type="normal.chat_2",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.model_sum = LLMRequest(
|
self.model_sum = LLMRequest(
|
||||||
model=global_config.model.memory_summary, temperature=0.7, max_tokens=3000, request_type="relation"
|
model=global_config.model.memory_summary, temperature=0.7, request_type="relation"
|
||||||
)
|
)
|
||||||
self.current_model_type = "r1" # 默认使用 R1
|
self.current_model_type = "r1" # 默认使用 R1
|
||||||
self.current_model_name = "unknown model"
|
self.current_model_name = "unknown model"
|
||||||
|
|||||||
@@ -110,7 +110,6 @@ class ActionPlanner:
|
|||||||
self.llm = LLMRequest(
|
self.llm = LLMRequest(
|
||||||
model=global_config.llm_PFC_action_planner,
|
model=global_config.llm_PFC_action_planner,
|
||||||
temperature=global_config.llm_PFC_action_planner["temp"],
|
temperature=global_config.llm_PFC_action_planner["temp"],
|
||||||
max_tokens=1500,
|
|
||||||
request_type="action_planning",
|
request_type="action_planning",
|
||||||
)
|
)
|
||||||
self.personality_info = individuality.get_prompt(x_person=2, level=3)
|
self.personality_info = individuality.get_prompt(x_person=2, level=3)
|
||||||
|
|||||||
@@ -89,7 +89,6 @@ class ReplyGenerator:
|
|||||||
self.llm = LLMRequest(
|
self.llm = LLMRequest(
|
||||||
model=global_config.llm_PFC_chat,
|
model=global_config.llm_PFC_chat,
|
||||||
temperature=global_config.llm_PFC_chat["temp"],
|
temperature=global_config.llm_PFC_chat["temp"],
|
||||||
max_tokens=300,
|
|
||||||
request_type="reply_generation",
|
request_type="reply_generation",
|
||||||
)
|
)
|
||||||
self.personality_info = individuality.get_prompt(x_person=2, level=3)
|
self.personality_info = individuality.get_prompt(x_person=2, level=3)
|
||||||
|
|||||||
@@ -56,7 +56,6 @@ class PersonInfoManager:
|
|||||||
# TODO: API-Adapter修改标记
|
# TODO: API-Adapter修改标记
|
||||||
self.qv_name_llm = LLMRequest(
|
self.qv_name_llm = LLMRequest(
|
||||||
model=global_config.model.utils,
|
model=global_config.model.utils,
|
||||||
max_tokens=256,
|
|
||||||
request_type="relation.qv_name",
|
request_type="relation.qv_name",
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -128,7 +128,6 @@ class RelationshipProcessor:
|
|||||||
|
|
||||||
self.llm_model = LLMRequest(
|
self.llm_model = LLMRequest(
|
||||||
model=global_config.model.relation,
|
model=global_config.model.relation,
|
||||||
max_tokens=800,
|
|
||||||
request_type="relation",
|
request_type="relation",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user