diff --git a/src/plugins/chat/emoji_manager.py b/src/plugins/chat/emoji_manager.py index f2bee4fb5..e7ff85803 100644 --- a/src/plugins/chat/emoji_manager.py +++ b/src/plugins/chat/emoji_manager.py @@ -194,7 +194,19 @@ class EmojiManager: async def _get_emoji_discription(self, image_base64: str) -> str: """获取表情包的标签""" try: - prompt = '这是一个表情包,简洁的描述一下表情包的内容和表情包所表达的情感' + prompt = '这是一个表情包,使用中文简洁的描述一下表情包的内容和表情包所表达的情感' + + content, _ = await self.llm.generate_response_for_image(prompt, image_base64) + logger.debug(f"输出描述: {content}") + return content + + except Exception as e: + logger.error(f"获取标签失败: {str(e)}") + return None + + async def _check_emoji(self, image_base64: str) -> str: + try: + prompt = '这是一个表情包,请回答这个表情包是否满足\"动漫风格,画风可爱\"的要求,是则回答是,否则回答否,不要出现任何其他内容' content, _ = await self.llm.generate_response_for_image(prompt, image_base64) logger.debug(f"输出描述: {content}") @@ -208,7 +220,7 @@ class EmojiManager: try: prompt = f'这是{global_config.BOT_NICKNAME}将要发送的消息内容:\n{text}\n若要为其配上表情包,请你输出这个表情包应该表达怎样的情感,应该给人什么样的感觉,不要太简洁也不要太长,注意不要输出任何对内容的分析内容,只输出\"一种什么样的感觉\"中间的形容词部分。' - content, _ = await self.llm.generate_response_async(prompt) + content, _ = await self.lm.generate_response_async(prompt) logger.info(f"输出描述: {content}") return content @@ -310,6 +322,13 @@ class EmojiManager: # 获取表情包的描述 discription = await self._get_emoji_discription(image_base64) + check = await self._check_emoji(image_base64) + if '是' not in check: + os.remove(image_path) + logger.info(f"描述: {discription}") + logger.info(f"其不满足过滤规则,被剔除 {check}") + continue + logger.info(f"check通过 {check}") tag = await self._get_emoji_tag(image_base64) embedding = get_embedding(discription) if discription is not None: diff --git a/src/plugins/chat/llm_generator.py b/src/plugins/chat/llm_generator.py index ab0f4e12c..a2f981c9e 100644 --- a/src/plugins/chat/llm_generator.py +++ b/src/plugins/chat/llm_generator.py @@ -24,6 +24,7 @@ class ResponseGenerator: self.model_r1 = LLM_request(model=global_config.llm_reasoning, temperature=0.7,max_tokens=1000) self.model_v3 = LLM_request(model=global_config.llm_normal, temperature=0.7,max_tokens=1000) self.model_r1_distill = LLM_request(model=global_config.llm_reasoning_minor, temperature=0.7,max_tokens=1000) + self.model_v25 = LLM_request(model=global_config.llm_normal_minor, temperature=0.7,max_tokens=1000) self.db = Database.get_instance() self.current_model_type = 'r1' # 默认使用 R1 @@ -138,7 +139,7 @@ class ResponseGenerator: 内容:{content} 输出: ''' - content, _ = await self.model_v3.generate_response(prompt) + content, _ = await self.model_v25.generate_response(prompt) content=content.strip() if content in ['happy','angry','sad','surprised','disgusted','fearful','neutral']: return [content]