feat:添加了focus的logger记录,修复潜在的bar request400,为表达器读取配置文件

This commit is contained in:
SengokuCola
2025-05-30 12:18:01 +08:00
parent 54724ae21e
commit 78df7ab553
8 changed files with 95 additions and 53 deletions

View File

@@ -41,8 +41,8 @@ def init_prompt():
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。
请你根据情景使用以下句法:
{grammar_habbits}
回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,你可以完全重组回复,保留最基本的表达含义就好,但注意回复要简短,但重组后保持语意通顺。
回复不要浮夸,不要夸张修辞,平淡一些。不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
不要浮夸,不要夸张修辞,平淡不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。
现在,你说:
""",
"default_expressor_prompt",
@@ -63,8 +63,8 @@ def init_prompt():
你需要使用合适的语法和句法,参考聊天内容,组织一条日常且口语化的回复。
请你根据情景使用以下句法:
{grammar_habbits}
回复尽量简短一些。可以参考贴吧,知乎和微博的回复风格,你可以完全重组回复,保留最基本的表达含义就好,但注意回复要简短,但重组后保持语意通顺。
回复不要浮夸,不要夸张修辞,平淡一些。不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。
{config_expression_style},你可以完全重组回复,保留最基本的表达含义就好,但重组后保持语意通顺。
不要浮夸,不要夸张修辞,平淡不要输出多余内容(包括前后缀冒号和引号括号表情包at或 @等 ),只输出一条回复就好。
现在,你说:
""",
"default_expressor_private_prompt", # New template for private FOCUSED chat
@@ -216,6 +216,7 @@ class DefaultExpressor:
reason=reason,
sender_name=sender_name_for_prompt, # Pass determined name
target_message=target_message,
config_expression_style=global_config.expression.expression_style,
)
# 4. 调用 LLM 生成回复
@@ -230,7 +231,7 @@ class DefaultExpressor:
with Timer("LLM生成", {}): # 内部计时器,可选保留
# TODO: API-Adapter修改标记
# logger.info(f"{self.log_prefix}[Replier-{thinking_id}]\nPrompt:\n{prompt}\n")
content, reasoning_content, model_name = await self.express_model.generate_response(prompt)
content, (reasoning_content, model_name) = await self.express_model.generate_response_async(prompt)
# logger.info(f"{self.log_prefix}\nPrompt:\n{prompt}\n---------------------------\n")
@@ -275,6 +276,7 @@ class DefaultExpressor:
sender_name,
in_mind_reply,
target_message,
config_expression_style,
) -> str:
is_group_chat = bool(chat_stream.group_info)
@@ -343,6 +345,7 @@ class DefaultExpressor:
reason=reason,
in_mind_reply=in_mind_reply,
target_message=target_message,
config_expression_style=config_expression_style,
)
else: # Private chat
template_name = "default_expressor_private_prompt"
@@ -358,6 +361,7 @@ class DefaultExpressor:
reason=reason,
in_mind_reply=in_mind_reply,
target_message=target_message,
config_expression_style=config_expression_style,
)
return prompt

View File

@@ -2,12 +2,14 @@ import time
import os
from typing import List, Optional, Dict, Any
log_dir = "log/log_cycle_debug/"
class CycleDetail:
"""循环信息记录类"""
def __init__(self, cycle_id: int):
self.cycle_id = cycle_id
self.prefix = ""
self.thinking_id = ""
self.start_time = time.time()
self.end_time: Optional[float] = None
@@ -21,21 +23,71 @@ class CycleDetail:
def to_dict(self) -> Dict[str, Any]:
"""将循环信息转换为字典格式"""
def convert_to_serializable(obj, depth=0, seen=None):
if seen is None:
seen = set()
# 防止递归过深
if depth > 5: # 降低递归深度限制
return str(obj)
# 防止循环引用
obj_id = id(obj)
if obj_id in seen:
return str(obj)
seen.add(obj_id)
try:
if hasattr(obj, 'to_dict'):
# 对于有to_dict方法的对象直接调用其to_dict方法
return obj.to_dict()
elif isinstance(obj, dict):
# 对于字典,只保留基本类型和可序列化的值
return {k: convert_to_serializable(v, depth + 1, seen)
for k, v in obj.items()
if isinstance(k, (str, int, float, bool))}
elif isinstance(obj, (list, tuple)):
# 对于列表和元组,只保留可序列化的元素
return [convert_to_serializable(item, depth + 1, seen)
for item in obj
if not isinstance(item, (dict, list, tuple)) or
isinstance(item, (str, int, float, bool, type(None)))]
elif isinstance(obj, (str, int, float, bool, type(None))):
return obj
else:
return str(obj)
finally:
seen.remove(obj_id)
return {
"cycle_id": self.cycle_id,
"start_time": self.start_time,
"end_time": self.end_time,
"timers": self.timers,
"thinking_id": self.thinking_id,
"loop_observation_info": self.loop_observation_info,
"loop_process_info": self.loop_process_info,
"loop_plan_info": self.loop_plan_info,
"loop_action_info": self.loop_action_info,
"loop_observation_info": convert_to_serializable(self.loop_observation_info),
"loop_process_info": convert_to_serializable(self.loop_process_info),
"loop_plan_info": convert_to_serializable(self.loop_plan_info),
"loop_action_info": convert_to_serializable(self.loop_action_info),
}
def complete_cycle(self):
"""完成循环,记录结束时间"""
self.end_time = time.time()
current_time_minute = time.strftime("%Y%m%d_%H%M", time.localtime())
self.log_cycle_to_file(log_dir + self.prefix + f"/{current_time_minute}_cycle_" + str(self.cycle_id) + ".json")
def log_cycle_to_file(self, file_path: str):
"""将循环信息写入文件"""
# 如果目录不存在,则创建目录
dir_name = os.path.dirname(file_path)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
# 写入文件
import json
with open(file_path, "a", encoding="utf-8") as f:
f.write(json.dumps(self.to_dict(), ensure_ascii=False) + "\n")
def set_thinking_id(self, thinking_id: str):
"""设置思考消息ID"""
@@ -47,30 +99,3 @@ class CycleDetail:
self.loop_processor_info = loop_info["loop_processor_info"]
self.loop_plan_info = loop_info["loop_plan_info"]
self.loop_action_info = loop_info["loop_action_info"]
@staticmethod
def list_cycles(stream_id: str, base_dir: str = "log_debug") -> List[str]:
"""
列出指定stream_id的所有循环文件
参数:
stream_id: 聊天流ID
base_dir: 基础目录默认为log_debug
返回:
List[str]: 文件路径列表
"""
try:
stream_dir = os.path.join(base_dir, stream_id)
if not os.path.exists(stream_dir):
return []
files = [
os.path.join(stream_dir, f)
for f in os.listdir(stream_dir)
if f.startswith("cycle_") and f.endswith(".txt")
]
return sorted(files)
except Exception as e:
print(f"列出循环文件时出错: {e}")
return []

View File

@@ -104,7 +104,7 @@ class ChattingInfoProcessor(BaseProcessor):
if obs.compressor_prompt:
summary = ""
try:
summary_result, _, _ = await self.model_summary.generate_response(obs.compressor_prompt)
summary_result, _ = await self.model_summary.generate_response_async(obs.compressor_prompt)
summary = "没有主题的闲聊"
if summary_result:
summary = summary_result

View File

@@ -71,7 +71,7 @@ class MindProcessor(BaseProcessor):
self.llm_model = LLMRequest(
model=global_config.model.focus_chat_mind,
temperature=global_config.model.focus_chat_mind["temp"],
# temperature=global_config.model.focus_chat_mind["temp"],
max_tokens=800,
request_type="focus.processor.chat_mind",
)
@@ -225,7 +225,7 @@ class MindProcessor(BaseProcessor):
# 处理总体异常
logger.error(f"{self.log_prefix} 执行LLM请求或处理响应时出错: {e}")
logger.error(traceback.format_exc())
content = "思考过程中出现错误"
content = "注意:思考过程中出现错误应该是LLM大模型有问题你需要告诉别人检查大模型配置"
# 记录初步思考结果
logger.debug(f"{self.log_prefix} 思考prompt: \n{prompt}\n")

View File

@@ -15,6 +15,7 @@ from src.common.logger_manager import get_logger
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
from src.individuality.individuality import individuality
from src.chat.focus_chat.planners.action_manager import ActionManager
from json_repair import repair_json
logger = get_logger("planner")
@@ -174,7 +175,9 @@ class ActionPlanner:
# --- 调用 LLM (普通文本生成) ---
llm_content = None
try:
llm_content, reasoning_content, _ = await self.planner_llm.generate_response(prompt=prompt)
prompt = f"{prompt}"
print(len(prompt))
llm_content, (reasoning_content, _) = await self.planner_llm.generate_response_async(prompt=prompt)
logger.debug(f"{self.log_prefix}[Planner] LLM 原始 JSON 响应 (预期): {llm_content}")
logger.debug(f"{self.log_prefix}[Planner] LLM 原始理由 响应 (预期): {reasoning_content}")
except Exception as req_e:
@@ -184,13 +187,16 @@ class ActionPlanner:
if llm_content:
try:
# 尝试去除可能的 markdown 代码块标记
cleaned_content = (
llm_content.strip().removeprefix("```json").removeprefix("```").removesuffix("```").strip()
)
if not cleaned_content:
raise json.JSONDecodeError("Cleaned content is empty", cleaned_content, 0)
parsed_json = json.loads(cleaned_content)
fixed_json_string = repair_json(llm_content)
if isinstance(fixed_json_string, str):
try:
parsed_json = json.loads(fixed_json_string)
except json.JSONDecodeError as decode_error:
logger.error(f"JSON解析错误: {str(decode_error)}")
parsed_json = {}
else:
# 如果repair_json直接返回了字典对象直接使用
parsed_json = fixed_json_string
# 提取决策,提供默认值
extracted_action = parsed_json.get("action", "no_reply")
@@ -244,6 +250,7 @@ class ActionPlanner:
"action_result": action_result,
"current_mind": current_mind,
"observed_messages": observed_messages,
"action_prompt": prompt,
}
return plan_result

View File

@@ -102,9 +102,11 @@ class HFCloopObservation:
def to_dict(self) -> dict:
"""将观察对象转换为可序列化的字典"""
# 只序列化基本信息,避免循环引用
return {
"observe_info": self.observe_info,
"observe_id": self.observe_id,
"last_observe_time": self.last_observe_time,
"history_loop": [cycle.to_dict() for cycle in self.history_loop]
# 不序列化history_loop,避免循环引用
"history_loop_count": len(self.history_loop)
}

View File

@@ -34,13 +34,12 @@ class PersonalityExpression:
def __init__(self):
self.express_learn_model: LLMRequest = LLMRequest(
model=global_config.model.focus_expressor,
temperature=0.1,
max_tokens=256,
max_tokens=512,
request_type="expressor.learner",
)
self.meta_file_path = os.path.join("data", "expression", "personality", "expression_style_meta.json")
self.expressions_file_path = os.path.join("data", "expression", "personality", "expressions.json")
self.max_calculations = 5
self.max_calculations = 10
def _read_meta_data(self):
if os.path.exists(self.meta_file_path):

View File

@@ -753,8 +753,13 @@ class LLMRequest:
response = await self._execute_request(endpoint="/chat/completions", payload=data, prompt=prompt)
# 原样返回响应,不做处理
return response
if len(response) == 3:
content, reasoning_content, tool_calls = response
return content, (reasoning_content, self.model_name, tool_calls)
else:
content, reasoning_content = response
return content, (reasoning_content, self.model_name)
async def generate_response_tool_async(self, prompt: str, tools: list, **kwargs) -> tuple[str, str, list]:
"""异步方式根据输入的提示生成模型的响应"""