Merge pull request #530 from wangw571/main-fix

对两个可视化功能和bot思考过程的修复
This commit is contained in:
SengokuCola
2025-03-22 21:13:49 +08:00
committed by GitHub
4 changed files with 14 additions and 4 deletions

View File

@@ -1,5 +1,5 @@
call conda activate niuniu @REM call conda activate niuniu
cd src\gui cd ../src\gui
start /b python reasoning_gui.py start /b ../../venv/scripts/python.exe reasoning_gui.py
exit exit

View File

@@ -6,6 +6,8 @@ import time
from datetime import datetime from datetime import datetime
from typing import Dict, List from typing import Dict, List
from typing import Optional from typing import Optional
sys.path.insert(0, sys.path[0]+"/../")
sys.path.insert(0, sys.path[0]+"/../")
from src.common.logger import get_module_logger from src.common.logger import get_module_logger
import customtkinter as ctk import customtkinter as ctk

View File

@@ -10,6 +10,12 @@ from pathlib import Path
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import networkx as nx import networkx as nx
from dotenv import load_dotenv from dotenv import load_dotenv
sys.path.insert(0, sys.path[0]+"/../")
sys.path.insert(0, sys.path[0]+"/../")
sys.path.insert(0, sys.path[0]+"/../")
sys.path.insert(0, sys.path[0]+"/../")
sys.path.insert(0, sys.path[0]+"/../")
from src.common.logger import get_module_logger
import jieba import jieba
# from chat.config import global_config # from chat.config import global_config

View File

@@ -274,6 +274,7 @@ class LLM_request:
raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}") raise RuntimeError(f"请求被拒绝: {error_code_mapping.get(response.status)}")
response.raise_for_status() response.raise_for_status()
reasoning_content = ""
# 将流式输出转化为非流式输出 # 将流式输出转化为非流式输出
if stream_mode: if stream_mode:
@@ -303,6 +304,8 @@ class LLM_request:
accumulated_content += delta_content accumulated_content += delta_content
# 检测流式输出文本是否结束 # 检测流式输出文本是否结束
finish_reason = chunk["choices"][0].get("finish_reason") finish_reason = chunk["choices"][0].get("finish_reason")
if delta.get("reasoning_content", None):
reasoning_content += delta["reasoning_content"]
if finish_reason == "stop": if finish_reason == "stop":
chunk_usage = chunk.get("usage", None) chunk_usage = chunk.get("usage", None)
if chunk_usage: if chunk_usage:
@@ -314,7 +317,6 @@ class LLM_request:
except Exception as e: except Exception as e:
logger.exception(f"解析流式输出错误: {str(e)}") logger.exception(f"解析流式输出错误: {str(e)}")
content = accumulated_content content = accumulated_content
reasoning_content = ""
think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL) think_match = re.search(r"<think>(.*?)</think>", content, re.DOTALL)
if think_match: if think_match:
reasoning_content = think_match.group(1).strip() reasoning_content = think_match.group(1).strip()