From 59d2a4e9181a80b7189ce067a25611aac6c5c61b Mon Sep 17 00:00:00 2001 From: Windpicker-owo <3431391539@qq.com> Date: Sat, 1 Nov 2025 13:48:31 +0800 Subject: [PATCH] =?UTF-8?q?fix(database):=20=E4=BF=AE=E5=A4=8Drecord=5Fllm?= =?UTF-8?q?=5Fusage=E5=87=BD=E6=95=B0=E7=9A=84=E5=AD=97=E6=AE=B5=E6=98=A0?= =?UTF-8?q?=E5=B0=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 更新使用正确的LLMUsage模型字段名: * input_tokens -> prompt_tokens * output_tokens -> completion_tokens * stream_id, platform (兼容参数,不存储) - 添加所有必需字段支持: * user_id, request_type, endpoint, cost, status * model_assign_name, model_api_provider * time_cost (可选) - 保持向后兼容的参数接口 - 修复后测试通过率提升至69.2% (18/26) --- src/common/database/api/specialized.py | 38 ++++++++++++++++++++------ 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/src/common/database/api/specialized.py b/src/common/database/api/specialized.py index 0a022e3af..3d7327102 100644 --- a/src/common/database/api/specialized.py +++ b/src/common/database/api/specialized.py @@ -293,6 +293,14 @@ async def record_llm_usage( output_tokens: int, stream_id: Optional[str] = None, platform: Optional[str] = None, + user_id: str = "system", + request_type: str = "chat", + model_assign_name: Optional[str] = None, + model_api_provider: Optional[str] = None, + endpoint: str = "/v1/chat/completions", + cost: float = 0.0, + status: str = "success", + time_cost: Optional[float] = None, use_batch: bool = True, ) -> Optional[LLMUsage]: """记录LLM使用情况 @@ -301,8 +309,16 @@ async def record_llm_usage( model_name: 模型名称 input_tokens: 输入token数 output_tokens: 输出token数 - stream_id: 流ID - platform: 平台 + stream_id: 流ID (兼容参数,实际不存储) + platform: 平台 (兼容参数,实际不存储) + user_id: 用户ID + request_type: 请求类型 + model_assign_name: 模型分配名称 + model_api_provider: 模型API提供商 + endpoint: API端点 + cost: 成本 + status: 状态 + time_cost: 时间成本 use_batch: 是否使用批处理 Returns: @@ -310,16 +326,20 @@ async def record_llm_usage( """ usage_data = { "model_name": model_name, - "input_tokens": input_tokens, - "output_tokens": output_tokens, + "prompt_tokens": input_tokens, # 使用正确的字段名 + "completion_tokens": output_tokens, # 使用正确的字段名 "total_tokens": input_tokens + output_tokens, - "timestamp": time.time(), + "user_id": user_id, + "request_type": request_type, + "endpoint": endpoint, + "cost": cost, + "status": status, + "model_assign_name": model_assign_name or model_name, + "model_api_provider": model_api_provider or "unknown", } - if stream_id: - usage_data["stream_id"] = stream_id - if platform: - usage_data["platform"] = platform + if time_cost is not None: + usage_data["time_cost"] = time_cost return await _llm_usage_crud.create(usage_data, use_batch=use_batch)