diff --git a/src/chat/utils/utils_voice.py b/src/chat/utils/utils_voice.py index 70e5d4fb9..49ec10794 100644 --- a/src/chat/utils/utils_voice.py +++ b/src/chat/utils/utils_voice.py @@ -15,7 +15,7 @@ async def get_voice_text(voice_base64: str) -> str: logger.warning("语音识别未启用,无法处理语音消息") return "[语音]" try: - _llm = LLMRequest(model_set=model_config.model_task_config.voice, request_type="voice") + _llm = LLMRequest(model_set=model_config.model_task_config.voice, request_type="audio") text = await _llm.generate_response_for_voice(voice_base64) if text is None: logger.warning("未能生成语音文本") diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py index 329e8f0ba..0a1284ccc 100644 --- a/src/llm_models/utils_model.py +++ b/src/llm_models/utils_model.py @@ -277,7 +277,7 @@ class LLMRequest: extra_params=model_info.extra_params, ) elif request_type == RequestType.AUDIO: - assert message_list is not None, "message_list cannot be None for audio requests" + assert audio_base64 is not None, "audio_base64 cannot be None for audio requests" return await client.get_audio_transcriptions( model_info=model_info, audio_base64=audio_base64,