diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index f60f8c205..49c83c135 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -961,6 +961,7 @@ class LLMRequest: ) await self._record_usage(model_info, response.usage, time.time() - start_time, "/chat/completions") + logger.debug(f"LLM原始响应: {response.content}") if not response.content and not response.tool_calls: if raise_when_empty: diff --git a/src/plugins/built_in/affinity_flow_chatter/plan_filter.py b/src/plugins/built_in/affinity_flow_chatter/plan_filter.py index 6b97c056b..229860cf1 100644 --- a/src/plugins/built_in/affinity_flow_chatter/plan_filter.py +++ b/src/plugins/built_in/affinity_flow_chatter/plan_filter.py @@ -60,13 +60,13 @@ class ChatterPlanFilter: prompt, used_message_id_list = await self._build_prompt(plan) plan.llm_prompt = prompt if global_config.debug.show_prompt: - logger.info(f"规划器原始提示词:{prompt}") + logger.debug(f"规划器原始提示词:{prompt}") llm_content, _ = await self.planner_llm.generate_response_async(prompt=prompt) if llm_content: if global_config.debug.show_prompt: - logger.info(f"LLM规划器原始响应:{llm_content}") + logger.debug(f"LLM规划器原始响应:{llm_content}") try: parsed_json = orjson.loads(repair_json(llm_content)) except orjson.JSONDecodeError: