改各种小问题
This commit is contained in:
@@ -8,7 +8,7 @@ from ...config.config import global_config
|
||||
from ...chat.emoji_manager import emoji_manager
|
||||
from .reasoning_generator import ResponseGenerator
|
||||
from ...chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
|
||||
from ...chat.message_sender import message_manager
|
||||
from ...chat.messagesender import message_manager
|
||||
from ...storage.storage import MessageStorage
|
||||
from ...chat.utils import is_mentioned_bot_in_message
|
||||
from ...chat.utils_image import image_path_to_base64
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import List, Optional, Tuple, Union
|
||||
import random
|
||||
|
||||
from ...models.utils_model import LLM_request
|
||||
from ...models.utils_model import LLMRequest
|
||||
from ...config.config import global_config
|
||||
from ...chat.message import MessageThinking
|
||||
from .reasoning_prompt_builder import prompt_builder
|
||||
@@ -22,20 +22,20 @@ logger = get_module_logger("llm_generator", config=llm_config)
|
||||
|
||||
class ResponseGenerator:
|
||||
def __init__(self):
|
||||
self.model_reasoning = LLM_request(
|
||||
self.model_reasoning = LLMRequest(
|
||||
model=global_config.llm_reasoning,
|
||||
temperature=0.7,
|
||||
max_tokens=3000,
|
||||
request_type="response_reasoning",
|
||||
)
|
||||
self.model_normal = LLM_request(
|
||||
self.model_normal = LLMRequest(
|
||||
model=global_config.llm_normal,
|
||||
temperature=global_config.llm_normal["temp"],
|
||||
max_tokens=256,
|
||||
request_type="response_reasoning",
|
||||
)
|
||||
|
||||
self.model_sum = LLM_request(
|
||||
self.model_sum = LLMRequest(
|
||||
model=global_config.llm_summary_by_topic, temperature=0.7, max_tokens=3000, request_type="relation"
|
||||
)
|
||||
self.current_model_type = "r1" # 默认使用 R1
|
||||
@@ -68,7 +68,7 @@ class ResponseGenerator:
|
||||
logger.info(f"{self.current_model_type}思考,失败")
|
||||
return None
|
||||
|
||||
async def _generate_response_with_model(self, message: MessageThinking, model: LLM_request, thinking_id: str):
|
||||
async def _generate_response_with_model(self, message: MessageThinking, model: LLMRequest, thinking_id: str):
|
||||
sender_name = ""
|
||||
|
||||
info_catcher = info_catcher_manager.get_info_catcher(thinking_id)
|
||||
|
||||
Reference in New Issue
Block a user