修改统计名称

This commit is contained in:
SengokuCola
2025-05-29 10:20:58 +08:00
parent 978147036a
commit 8ba69ad695
12 changed files with 12 additions and 12 deletions

View File

@@ -79,7 +79,7 @@ class DefaultExpressor:
model=global_config.model.focus_expressor, model=global_config.model.focus_expressor,
# temperature=global_config.model.focus_expressor["temp"], # temperature=global_config.model.focus_expressor["temp"],
max_tokens=256, max_tokens=256,
request_type="focus_expressor", request_type="focus.expressor",
) )
self.heart_fc_sender = HeartFCSender() self.heart_fc_sender = HeartFCSender()

View File

@@ -64,7 +64,7 @@ class ExpressionLearner:
model=global_config.model.focus_expressor, model=global_config.model.focus_expressor,
temperature=0.1, temperature=0.1,
max_tokens=256, max_tokens=256,
request_type="learn_expression", request_type="expressor.learner",
) )
async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]: async def get_expression_by_chat_id(self, chat_id: str) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:

View File

@@ -28,7 +28,7 @@ class ChattingInfoProcessor(BaseProcessor):
super().__init__() super().__init__()
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.model_summary = LLMRequest( self.model_summary = LLMRequest(
model=global_config.model.utils_small, temperature=0.7, max_tokens=300, request_type="chat_observation" model=global_config.model.utils_small, temperature=0.7, max_tokens=300, request_type="focus.observation.chat"
) )
async def process_info( async def process_info(

View File

@@ -79,7 +79,7 @@ class MindProcessor(BaseProcessor):
model=global_config.model.focus_chat_mind, model=global_config.model.focus_chat_mind,
temperature=global_config.model.focus_chat_mind["temp"], temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800, max_tokens=800,
request_type="focus_chat_mind", request_type="focus.processor.chat_mind",
) )
self.current_mind = "" self.current_mind = ""

View File

@@ -58,7 +58,7 @@ class SelfProcessor(BaseProcessor):
model=global_config.model.focus_self_recognize, model=global_config.model.focus_self_recognize,
temperature=global_config.model.focus_self_recognize["temp"], temperature=global_config.model.focus_self_recognize["temp"],
max_tokens=800, max_tokens=800,
request_type="focus_self_identify", request_type="focus.processor.self_identify",
) )
name = chat_manager.get_stream_name(self.subheartflow_id) name = chat_manager.get_stream_name(self.subheartflow_id)

View File

@@ -51,7 +51,7 @@ class ToolProcessor(BaseProcessor):
self.llm_model = LLMRequest( self.llm_model = LLMRequest(
model=global_config.model.focus_tool_use, model=global_config.model.focus_tool_use,
max_tokens=500, max_tokens=500,
request_type="focus_tool", request_type="focus.processor.tool",
) )
self.structured_info = [] self.structured_info = []

View File

@@ -64,7 +64,7 @@ class WorkingMemoryProcessor(BaseProcessor):
model=global_config.model.focus_chat_mind, model=global_config.model.focus_chat_mind,
temperature=global_config.model.focus_chat_mind["temp"], temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800, max_tokens=800,
request_type="focus_working_memory", request_type="focus.processor.working_memory",
) )
name = chat_manager.get_stream_name(self.subheartflow_id) name = chat_manager.get_stream_name(self.subheartflow_id)

View File

@@ -70,7 +70,7 @@ class MemoryActivator:
def __init__(self): def __init__(self):
# TODO: API-Adapter修改标记 # TODO: API-Adapter修改标记
self.summary_model = LLMRequest( self.summary_model = LLMRequest(
model=global_config.model.memory_summary, temperature=0.7, max_tokens=50, request_type="chat_observation" model=global_config.model.memory_summary, temperature=0.7, max_tokens=50, request_type="focus.memory_activator"
) )
self.running_memory = [] self.running_memory = []
self.cached_keywords = set() # 用于缓存历史关键词 self.cached_keywords = set() # 用于缓存历史关键词

View File

@@ -79,7 +79,7 @@ class ActionPlanner:
self.planner_llm = LLMRequest( self.planner_llm = LLMRequest(
model=global_config.model.focus_planner, model=global_config.model.focus_planner,
max_tokens=1000, max_tokens=1000,
request_type="focus_planner", # 用于动作规划 request_type="focus.planner", # 用于动作规划
) )
self.action_manager = action_manager self.action_manager = action_manager

View File

@@ -36,7 +36,7 @@ class MemoryManager:
model=global_config.model.focus_working_memory, model=global_config.model.focus_working_memory,
temperature=0.3, temperature=0.3,
max_tokens=512, max_tokens=512,
request_type="memory_summarization", request_type="focus.processor.working_memory",
) )
@property @property

View File

@@ -36,7 +36,7 @@ class PersonalityExpression:
model=global_config.model.focus_expressor, model=global_config.model.focus_expressor,
temperature=0.1, temperature=0.1,
max_tokens=256, max_tokens=256,
request_type="learn_expression", request_type="expressor.learner",
) )
self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json") self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json")
self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json") self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json")

View File

@@ -60,7 +60,7 @@ class PersonInfoManager:
self.qv_name_llm = LLMRequest( self.qv_name_llm = LLMRequest(
model=global_config.model.utils, model=global_config.model.utils,
max_tokens=256, max_tokens=256,
request_type="qv_name", request_type="relation.qv_name",
) )
try: try:
db.connect(reuse_if_open=True) db.connect(reuse_if_open=True)