diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py index e3525b3bb..398cb37e3 100644 --- a/src/plugins/chat/bot.py +++ b/src/plugins/chat/bot.py @@ -119,7 +119,7 @@ class ChatBot: willing_manager.change_reply_willing_sent(thinking_message.group_id) - response, emotion = await self.gpt.generate_response(message) + response,raw_content = await self.gpt.generate_response(message) # if response is None: # thinking_message.interupt=True @@ -171,6 +171,13 @@ class ChatBot: message_manager.add_message(message_set) bot_response_time = tinking_time_point + emotion = await self.gpt._get_emotion_tags(raw_content) + print(f"为 '{response}' 获取到的情感标签为:{emotion}") + valuedict={ + 'happy':0.5,'angry':-1,'sad':-0.5,'surprised':0.5,'disgusted':-1.5,'fearful':-0.25,'neutral':0.25 + } + await relationship_manager.update_relationship_value(message.user_id, relationship_value=valuedict[emotion[0]]) + if random() < global_config.emoji_chance: emoji_path = await emoji_manager.get_emoji_for_emotion(emotion) if emoji_path: diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index 04f2e73ad..ab0f4e12c 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -44,19 +44,15 @@ class ResponseGenerator: print(f"+++++++++++++++++{global_config.BOT_NICKNAME}{self.current_model_type}思考中+++++++++++++++++") model_response = await self._generate_response_with_model(message, current_model) + raw_content=model_response if model_response: print(f'{global_config.BOT_NICKNAME}的回复是:{model_response}') - model_response, emotion = await self._process_response(model_response) + model_response = await self._process_response(model_response) if model_response: - print(f"为 '{model_response}' 获取到的情感标签为:{emotion}") - valuedict={ - 'happy':0.5,'angry':-1,'sad':-0.5,'surprised':0.5,'disgusted':-1.5,'fearful':-0.25,'neutral':0.25 - } - await relationship_manager.update_relationship_value(message.user_id, relationship_value=valuedict[emotion[0]]) - return model_response, emotion - return None, [] + return model_response ,raw_content + return None,raw_content async def _generate_response_with_model(self, message: Message, model: LLM_request) -> Optional[str]: """使用指定的模型生成回复""" @@ -158,10 +154,9 @@ class ResponseGenerator: if not content: return None, [] - emotion_tags = await self._get_emotion_tags(content) processed_response = process_llm_response(content) - return processed_response, emotion_tags + return processed_response class InitiativeMessageGenerate: diff --git a/src/plugins/models/utils_model.py b/src/plugins/models/utils_model.py index 0717bc3a7..57a0acb55 100644 --- a/src/plugins/models/utils_model.py +++ b/src/plugins/models/utils_model.py @@ -41,7 +41,7 @@ class LLM_request: # 发送请求到完整的chat/completions端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" - logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL + logger.info(f"发送请求到URL: {api_url}{self.model_name}") # 记录请求的URL max_retries = 3 base_wait_time = 15 @@ -122,7 +122,7 @@ class LLM_request: # 发送请求到完整的chat/completions端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" - logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL + logger.info(f"发送请求到URL: {api_url}{self.model_name}") # 记录请求的URL max_retries = 3 base_wait_time = 15 @@ -270,7 +270,7 @@ class LLM_request: # 发送请求到完整的chat/completions端点 api_url = f"{self.base_url.rstrip('/')}/chat/completions" - logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL + logger.info(f"发送请求到URL: {api_url}{self.model_name}") # 记录请求的URL max_retries = 2 base_wait_time = 6 @@ -335,7 +335,7 @@ class LLM_request: } api_url = f"{self.base_url.rstrip('/')}/embeddings" - logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL + logger.info(f"发送请求到URL: {api_url}{self.model_name}") # 记录请求的URL max_retries = 2 base_wait_time = 6 @@ -391,7 +391,7 @@ class LLM_request: } api_url = f"{self.base_url.rstrip('/')}/embeddings" - logger.info(f"发送请求到URL: {api_url}") # 记录请求的URL + logger.info(f"发送请求到URL: {api_url}{self.model_name}") # 记录请求的URL max_retries = 3 base_wait_time = 15