改各种小问题

This commit is contained in:
春河晴
2025-04-16 17:37:28 +09:00
parent a0b1b1f8d8
commit dc2cf843e5
36 changed files with 114 additions and 107 deletions

View File

@@ -1,7 +1,7 @@
from .sub_heartflow import SubHeartflow
from .observation import ChattingObservation
from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLM_request
from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
from src.plugins.schedule.schedule_generator import bot_schedule
from src.plugins.utils.prompt_builder import Prompt, global_prompt_manager
@@ -60,7 +60,7 @@ class Heartflow:
self.current_mind = "你什么也没想"
self.past_mind = []
self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request(
self.llm_model = LLMRequest(
model=global_config.llm_heartflow, temperature=0.6, max_tokens=1000, request_type="heart_flow"
)

View File

@@ -1,7 +1,7 @@
# 定义了来自外部世界的信息
# 外部世界可以是某个聊天 不同平台的聊天 也可以是任意媒体
from datetime import datetime
from src.plugins.models.utils_model import LLM_request
from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
from src.common.database import db
from src.common.logger import get_module_logger
@@ -40,7 +40,7 @@ class ChattingObservation(Observation):
self.updating_old = False
self.llm_summary = LLM_request(
self.llm_summary = LLMRequest(
model=global_config.llm_observation, temperature=0.7, max_tokens=300, request_type="chat_observation"
)

View File

@@ -1,7 +1,7 @@
from .observation import Observation, ChattingObservation
import asyncio
from src.plugins.moods.moods import MoodManager
from src.plugins.models.utils_model import LLM_request
from src.plugins.models.utils_model import LLMRequest
from src.plugins.config.config import global_config
import time
from src.plugins.chat.message import UserInfo
@@ -79,7 +79,7 @@ class SubHeartflow:
self.current_mind = ""
self.past_mind = []
self.current_state: CurrentState = CurrentState()
self.llm_model = LLM_request(
self.llm_model = LLMRequest(
model=global_config.llm_sub_heartflow,
temperature=global_config.llm_sub_heartflow["temp"],
max_tokens=600,