From 5d31cc8bbe52b121f4bcab0bf75a820969933afb Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Fri, 9 May 2025 21:20:31 +0800
Subject: [PATCH 01/20] =?UTF-8?q?feat:=20=E6=9B=B4=E6=96=B0docker=E9=95=9C?=
=?UTF-8?q?=E5=83=8F=E7=9A=84tag?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 54 +++++++++++++++++-------------
docker-compose.yml | 4 +--
2 files changed, 33 insertions(+), 25 deletions(-)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index 605d838ce..36c7604fa 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -6,10 +6,9 @@ on:
- main
- classical
- dev
- - new_knowledge
tags:
- - 'v*'
- workflow_dispatch:
+ - "v*.*.*"
+ - "v*"
jobs:
build-and-push:
@@ -20,6 +19,11 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
- name: Clone maim_message
run: git clone https://github.com/MaiM-with-u/maim_message maim_message
@@ -29,6 +33,8 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
+ with:
+ buildkitd-flags: --debug
- name: Login to Docker Hub
uses: docker/login-action@v3
@@ -36,20 +42,18 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- - name: Determine Image Tags
- id: tags
- run: |
- if [[ "${{ github.ref }}" == refs/tags/* ]]; then
- echo "tags=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:${{ github.ref_name }},${{ secrets.DOCKERHUB_USERNAME }}/maimbot:latest" >> $GITHUB_OUTPUT
- elif [ "${{ github.ref }}" == "refs/heads/main" ]; then
- echo "tags=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:main,${{ secrets.DOCKERHUB_USERNAME }}/maimbot:main-$(date -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT
- elif [ "${{ github.ref }}" == "refs/heads/classical" ]; then
- echo "tags=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:classical,${{ secrets.DOCKERHUB_USERNAME }}/maimbot:classical-$(date -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT
- elif [ "${{ github.ref }}" == "refs/heads/dev" ]; then
- echo "tags=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:dev,${{ secrets.DOCKERHUB_USERNAME }}/maimbot:dev-$(date -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT
- elif [ "${{ github.ref }}" == "refs/heads/new_knowledge" ]; then
- echo "tags=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:knowledge,${{ secrets.DOCKERHUB_USERNAME }}/maimbot:knowledge-$(date -u +'%Y%m%d%H%M%S')" >> $GITHUB_OUTPUT
- fi
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=sha
- name: Build and Push Docker Image
uses: docker/build-push-action@v5
@@ -57,10 +61,14 @@ jobs:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
- tags: ${{ steps.tags.outputs.tags }}
+ tags: ${{ steps.meta.outputs.tags }}
push: true
- cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:buildcache
- cache-to: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maimbot:buildcache,mode=max
- labels: |
- org.opencontainers.image.created=${{ steps.tags.outputs.date_tag }}
- org.opencontainers.image.revision=${{ github.sha }}
\ No newline at end of file
+ cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:buildcache
+ cache-to: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:buildcache,mode=max
+ labels: ${{ steps.meta.outputs.labels }}
+ provenance: true
+ sbom: true
+ build-args: |
+ BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
+ VCS_REF=${{ github.sha }}
+ outputs: type=image,push=true
\ No newline at end of file
diff --git a/docker-compose.yml b/docker-compose.yml
index 000d00c35..363fafc2f 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -16,8 +16,8 @@ services:
- maim_bot
core:
container_name: maim-bot-core
- image: sengokucola/maimbot:main
- # image: infinitycat/maimbot:main
+ image: sengokucola/maibot:main
+ # image: infinitycat/maibot:main
environment:
- TZ=Asia/Shanghai
# - EULA_AGREE=35362b6ea30f12891d46ef545122e84a # 同意EULA
From 3323c8dc498984cb938fc16f531dff05e8c5bc21 Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Fri, 9 May 2025 21:42:00 +0800
Subject: [PATCH 02/20] =?UTF-8?q?feat:=20=E6=9B=B4=E6=96=B0docker-compose?=
=?UTF-8?q?=E7=9A=84tag?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
docker-compose.yml | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 363fafc2f..2392f707f 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -16,8 +16,11 @@ services:
- maim_bot
core:
container_name: maim-bot-core
- image: sengokucola/maibot:main
- # image: infinitycat/maibot:main
+ image: sengokucola/maibot:latest
+ # image: infinitycat/maibot:latest
+ # dev
+ # image: sengokucola/maibot:dev
+ # image: infinitycat/maibot:dev
environment:
- TZ=Asia/Shanghai
# - EULA_AGREE=35362b6ea30f12891d46ef545122e84a # 同意EULA
From 835efd5daae12ef268bb016d9e12f8e9a184fc9c Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Sat, 10 May 2025 01:41:56 +0800
Subject: [PATCH 03/20] =?UTF-8?q?feat:=20=E9=87=8D=E6=9E=84Docker=E9=95=9C?=
=?UTF-8?q?=E5=83=8F=E6=9E=84=E5=BB=BA=E6=B5=81=E7=A8=8B=EF=BC=8C=E6=96=B0?=
=?UTF-8?q?=E5=A2=9E=E5=A4=9A=E5=B9=B3=E5=8F=B0=E6=94=AF=E6=8C=81=E5=92=8C?=
=?UTF-8?q?=E6=91=98=E8=A6=81=E4=B8=8A=E4=BC=A0=E5=8A=9F=E8=83=BD?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 129 +++++++++++++++++++++++------
1 file changed, 102 insertions(+), 27 deletions(-)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index 36c7604fa..3fce193b2 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -10,20 +10,58 @@ on:
- "v*.*.*"
- "v*"
+env:
+ REGISTRY_IMAGE: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
+
jobs:
- build-and-push:
+ prepare:
runs-on: ubuntu-latest
- env:
- DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USERNAME }}
- DATE_TAG: $(date -u +'%Y-%m-%dT%H-%M-%S')
+ outputs:
+ tags: ${{ steps.meta.outputs.tags }}
+ labels: ${{ steps.meta.outputs.labels }}
+ bake-file: ${{ steps.meta.outputs.bake-file }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
+
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.REGISTRY_IMAGE }}
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=sha
- - name: Set up QEMU
- uses: docker/setup-qemu-action@v3
+ build:
+ runs-on: ubuntu-latest
+ needs: prepare
+ strategy:
+ fail-fast: false
+ matrix:
+ platform:
+ - linux/amd64
+ - linux/arm64
+ - linux/arm/v7
+ - linux/arm/v6
+ - linux/386
+ - linux/loong64
+ steps:
+ - name: Prepare
+ run: |
+ platform=${{ matrix.platform }}
+ echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
+
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
- name: Clone maim_message
run: git clone https://github.com/MaiM-with-u/maim_message maim_message
@@ -31,6 +69,9 @@ jobs:
- name: Clone lpmm
run: git clone https://github.com/MaiM-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
@@ -42,33 +83,67 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- - name: Docker meta
- id: meta
- uses: docker/metadata-action@v5
- with:
- images: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
- tags: |
- type=ref,event=branch
- type=ref,event=tag
- type=semver,pattern={{version}}
- type=semver,pattern={{major}}.{{minor}}
- type=semver,pattern={{major}}
- type=sha
-
- - name: Build and Push Docker Image
+ - name: Build and push by digest
+ id: build
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile
- platforms: linux/amd64,linux/arm64
- tags: ${{ steps.meta.outputs.tags }}
- push: true
- cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:buildcache
- cache-to: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:buildcache,mode=max
- labels: ${{ steps.meta.outputs.labels }}
+ platforms: ${{ matrix.platform }}
+ labels: ${{ needs.prepare.outputs.labels }}
+ cache-from: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }}
+ cache-to: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }},mode=max
provenance: true
sbom: true
build-args: |
BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
VCS_REF=${{ github.sha }}
- outputs: type=image,push=true
\ No newline at end of file
+ outputs: type=image,push-by-digest=true,name-canonical=true,push=true
+
+ - name: Export digest
+ run: |
+ mkdir -p ${{ runner.temp }}/digests
+ digest="${{ steps.build.outputs.digest }}"
+ touch "${{ runner.temp }}/digests/${digest#sha256:}"
+
+ - name: Upload digest
+ uses: actions/upload-artifact@v4
+ with:
+ name: digests-${{ env.PLATFORM_PAIR }}
+ path: ${{ runner.temp }}/digests/*
+ if-no-files-found: error
+ retention-days: 1
+
+ merge:
+ runs-on: ubuntu-latest
+ needs:
+ - prepare
+ - build
+ steps:
+ - name: Download digests
+ uses: actions/download-artifact@v4
+ with:
+ path: ${{ runner.temp }}/digests
+ pattern: digests-*
+ merge-multiple: true
+
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Create manifest list and push
+ working-directory: ${{ runner.temp }}/digests
+ run: |
+ docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${{ needs.prepare.outputs.tags }}") \
+ $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
+
+ - name: Inspect image
+ run: |
+ tags_json='${{ needs.prepare.outputs.tags }}'
+ first_tag=$(echo $tags_json | jq -r '.tags[0]')
+ docker buildx imagetools inspect $first_tag
\ No newline at end of file
From de1c36f8e8ee64dd0ea2abaca8782ab99b56f211 Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Sat, 10 May 2025 01:46:17 +0800
Subject: [PATCH 04/20] =?UTF-8?q?feat:=20=E5=9C=A8Docker=E9=95=9C=E5=83=8F?=
=?UTF-8?q?=E6=9E=84=E5=BB=BA=E6=B5=81=E7=A8=8B=E4=B8=AD=E6=B7=BB=E5=8A=A0?=
=?UTF-8?q?=E6=A0=87=E7=AD=BE=E6=94=AF=E6=8C=81=EF=BC=8C=E4=BB=A5=E4=BE=BF?=
=?UTF-8?q?=E4=BA=8E=E7=89=88=E6=9C=AC=E7=AE=A1=E7=90=86?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 1 +
1 file changed, 1 insertion(+)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index 3fce193b2..097fdac37 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -91,6 +91,7 @@ jobs:
file: ./Dockerfile
platforms: ${{ matrix.platform }}
labels: ${{ needs.prepare.outputs.labels }}
+ tags: ${{ env.REGISTRY_IMAGE }}:${{ github.sha }}-${{ env.PLATFORM_PAIR }}
cache-from: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }}
cache-to: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }},mode=max
provenance: true
From 4fc33278c98e9cf0d97b5a238f9ed69198de8bbf Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Sat, 10 May 2025 01:51:09 +0800
Subject: [PATCH 05/20] =?UTF-8?q?feat:=20=E7=B2=BE=E7=AE=80Docker=E9=95=9C?=
=?UTF-8?q?=E5=83=8F=E6=9E=84=E5=BB=BA=E6=B5=81=E7=A8=8B=EF=BC=8C=E7=A7=BB?=
=?UTF-8?q?=E9=99=A4=E4=B8=8D=E5=BF=85=E8=A6=81=E7=9A=84=E5=B9=B3=E5=8F=B0?=
=?UTF-8?q?=E6=94=AF=E6=8C=81=E5=B9=B6=E6=9B=B4=E6=96=B0=E6=A0=87=E7=AD=BE?=
=?UTF-8?q?=E9=85=8D=E7=BD=AE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index 097fdac37..fb3d4938d 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -48,10 +48,6 @@ jobs:
platform:
- linux/amd64
- linux/arm64
- - linux/arm/v7
- - linux/arm/v6
- - linux/386
- - linux/loong64
steps:
- name: Prepare
run: |
@@ -91,7 +87,7 @@ jobs:
file: ./Dockerfile
platforms: ${{ matrix.platform }}
labels: ${{ needs.prepare.outputs.labels }}
- tags: ${{ env.REGISTRY_IMAGE }}:${{ github.sha }}-${{ env.PLATFORM_PAIR }}
+ tags: ${{ env.REGISTRY_IMAGE }}
cache-from: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }}
cache-to: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }},mode=max
provenance: true
From f96fffe16eb8981f2c7f657effd1b503cdfadc0c Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Sat, 10 May 2025 02:00:59 +0800
Subject: [PATCH 06/20] =?UTF-8?q?feat:=20=E6=9B=B4=E6=96=B0Docker=E9=95=9C?=
=?UTF-8?q?=E5=83=8F=E6=9E=84=E5=BB=BA=E6=B5=81=E7=A8=8B=EF=BC=8C=E7=A1=AE?=
=?UTF-8?q?=E4=BF=9D=E4=BD=BF=E7=94=A8=E9=BB=98=E8=AE=A4=E6=A0=87=E7=AD=BE?=
=?UTF-8?q?=E5=B9=B6=E4=BC=98=E5=8C=96=E6=A0=87=E7=AD=BE=E5=A4=84=E7=90=86?=
=?UTF-8?q?=E9=80=BB=E8=BE=91?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index fb3d4938d..7ea9d86e7 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -136,8 +136,16 @@ jobs:
- name: Create manifest list and push
working-directory: ${{ runner.temp }}/digests
run: |
- docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${{ needs.prepare.outputs.tags }}") \
- $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
+ # 确保至少有一个默认标签
+ TAGS="-t ${{ env.REGISTRY_IMAGE }}:latest"
+
+ # 如果 meta 输出的标签不为空,则使用它们
+ if [ -n "${{ needs.prepare.outputs.tags }}" ]; then
+ TAGS=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${{ needs.prepare.outputs.tags }}")
+ fi
+
+ echo "Using tags: ${TAGS}"
+ docker buildx imagetools create ${TAGS} $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
- name: Inspect image
run: |
From 5ad1993fee7d127b1af776e5816497169be24e14 Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Sat, 10 May 2025 02:11:26 +0800
Subject: [PATCH 07/20] =?UTF-8?q?feat:=20=E4=BC=98=E5=8C=96Docker=E9=95=9C?=
=?UTF-8?q?=E5=83=8F=E6=9E=84=E5=BB=BA=E6=B5=81=E7=A8=8B=EF=BC=8C=E5=A2=9E?=
=?UTF-8?q?=E5=BC=BA=E6=A0=87=E7=AD=BE=E5=A4=84=E7=90=86=E9=80=BB=E8=BE=91?=
=?UTF-8?q?=E4=BB=A5=E6=94=AF=E6=8C=81=E9=BB=98=E8=AE=A4=E6=A0=87=E7=AD=BE?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 20 +++++++++++++++++---
1 file changed, 17 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index 7ea9d86e7..a2e4cfc83 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -149,6 +149,20 @@ jobs:
- name: Inspect image
run: |
- tags_json='${{ needs.prepare.outputs.tags }}'
- first_tag=$(echo $tags_json | jq -r '.tags[0]')
- docker buildx imagetools inspect $first_tag
\ No newline at end of file
+ # 使用默认标签
+ DEFAULT_TAG="${{ env.REGISTRY_IMAGE }}:latest"
+
+ # 尝试从 prepare 输出中获取标签
+ if [ -n "${{ needs.prepare.outputs.tags }}" ]; then
+ TAGS_JSON='${{ needs.prepare.outputs.tags }}'
+ FIRST_TAG=$(echo $TAGS_JSON | jq -r '.tags[0]')
+ if [ -n "$FIRST_TAG" ] && [ "$FIRST_TAG" != "null" ]; then
+ echo "使用从 metadata 获取的标签: $FIRST_TAG"
+ docker buildx imagetools inspect $FIRST_TAG
+ exit 0
+ fi
+ fi
+
+ # 如果没有标签或提取失败,使用默认标签
+ echo "使用默认标签: $DEFAULT_TAG"
+ docker buildx imagetools inspect $DEFAULT_TAG
\ No newline at end of file
From 606b89c99b233426d90d3289418eec0476e5e27f Mon Sep 17 00:00:00 2001
From: infinitycat
Date: Sat, 10 May 2025 02:37:46 +0800
Subject: [PATCH 08/20] =?UTF-8?q?feat:=20=E9=87=8D=E6=9E=84Docker=E9=95=9C?=
=?UTF-8?q?=E5=83=8F=E6=9E=84=E5=BB=BA=E6=B5=81=E7=A8=8B=EF=BC=8C=E6=96=B0?=
=?UTF-8?q?=E5=A2=9EAMD64=E5=92=8CARM64=E6=9E=B6=E6=9E=84=E6=94=AF?=
=?UTF-8?q?=E6=8C=81=EF=BC=8C=E5=B9=B6=E4=BC=98=E5=8C=96=E5=A4=9A=E6=9E=B6?=
=?UTF-8?q?=E6=9E=84=E6=B8=85=E5=8D=95=E5=88=9B=E5=BB=BA=E9=80=BB=E8=BE=91?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.github/workflows/docker-image.yml | 217 +++++++++++++++--------------
1 file changed, 109 insertions(+), 108 deletions(-)
diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml
index a2e4cfc83..ba56b0c24 100644
--- a/.github/workflows/docker-image.yml
+++ b/.github/workflows/docker-image.yml
@@ -10,50 +10,13 @@ on:
- "v*.*.*"
- "v*"
-env:
- REGISTRY_IMAGE: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
-
jobs:
- prepare:
+ build-amd64:
+ name: Build AMD64 Image
runs-on: ubuntu-latest
- outputs:
- tags: ${{ steps.meta.outputs.tags }}
- labels: ${{ steps.meta.outputs.labels }}
- bake-file: ${{ steps.meta.outputs.bake-file }}
+ env:
+ DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USERNAME }}
steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Docker meta
- id: meta
- uses: docker/metadata-action@v5
- with:
- images: ${{ env.REGISTRY_IMAGE }}
- tags: |
- type=ref,event=branch
- type=ref,event=tag
- type=semver,pattern={{version}}
- type=semver,pattern={{major}}.{{minor}}
- type=semver,pattern={{major}}
- type=sha
-
- build:
- runs-on: ubuntu-latest
- needs: prepare
- strategy:
- fail-fast: false
- matrix:
- platform:
- - linux/amd64
- - linux/arm64
- steps:
- - name: Prepare
- run: |
- platform=${{ matrix.platform }}
- echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
-
- name: Checkout code
uses: actions/checkout@v4
with:
@@ -65,9 +28,6 @@ jobs:
- name: Clone lpmm
run: git clone https://github.com/MaiM-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
- - name: Set up QEMU
- uses: docker/setup-qemu-action@v3
-
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
@@ -79,50 +39,61 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- - name: Build and push by digest
- id: build
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=sha
+
+ - name: Build and Push AMD64 Docker Image
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile
- platforms: ${{ matrix.platform }}
- labels: ${{ needs.prepare.outputs.labels }}
- tags: ${{ env.REGISTRY_IMAGE }}
- cache-from: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }}
- cache-to: type=registry,ref=${{ env.REGISTRY_IMAGE }}:buildcache-${{ env.PLATFORM_PAIR }},mode=max
+ platforms: linux/amd64
+ tags: ${{ secrets.DOCKERHUB_USERNAME }}/maibot:amd64-${{ github.sha }}
+ push: true
+ cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:amd64-buildcache
+ cache-to: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:amd64-buildcache,mode=max
+ labels: ${{ steps.meta.outputs.labels }}
provenance: true
sbom: true
build-args: |
BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
VCS_REF=${{ github.sha }}
- outputs: type=image,push-by-digest=true,name-canonical=true,push=true
+ outputs: type=image,push=true
- - name: Export digest
- run: |
- mkdir -p ${{ runner.temp }}/digests
- digest="${{ steps.build.outputs.digest }}"
- touch "${{ runner.temp }}/digests/${digest#sha256:}"
-
- - name: Upload digest
- uses: actions/upload-artifact@v4
- with:
- name: digests-${{ env.PLATFORM_PAIR }}
- path: ${{ runner.temp }}/digests/*
- if-no-files-found: error
- retention-days: 1
-
- merge:
+ build-arm64:
+ name: Build ARM64 Image
runs-on: ubuntu-latest
- needs:
- - prepare
- - build
+ env:
+ DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USERNAME }}
steps:
- - name: Download digests
- uses: actions/download-artifact@v4
+ - name: Checkout code
+ uses: actions/checkout@v4
with:
- path: ${{ runner.temp }}/digests
- pattern: digests-*
- merge-multiple: true
+ fetch-depth: 0
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Clone maim_message
+ run: git clone https://github.com/MaiM-with-u/maim_message maim_message
+
+ - name: Clone lpmm
+ run: git clone https://github.com/MaiM-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+ with:
+ buildkitd-flags: --debug
- name: Login to Docker Hub
uses: docker/login-action@v3
@@ -130,39 +101,69 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v3
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=sha
- - name: Create manifest list and push
- working-directory: ${{ runner.temp }}/digests
- run: |
- # 确保至少有一个默认标签
- TAGS="-t ${{ env.REGISTRY_IMAGE }}:latest"
-
- # 如果 meta 输出的标签不为空,则使用它们
- if [ -n "${{ needs.prepare.outputs.tags }}" ]; then
- TAGS=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${{ needs.prepare.outputs.tags }}")
- fi
-
- echo "Using tags: ${TAGS}"
- docker buildx imagetools create ${TAGS} $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
+ - name: Build and Push ARM64 Docker Image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: ./Dockerfile
+ platforms: linux/arm64
+ tags: ${{ secrets.DOCKERHUB_USERNAME }}/maibot:arm64-${{ github.sha }}
+ push: true
+ cache-from: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:arm64-buildcache
+ cache-to: type=registry,ref=${{ secrets.DOCKERHUB_USERNAME }}/maibot:arm64-buildcache,mode=max
+ labels: ${{ steps.meta.outputs.labels }}
+ provenance: true
+ sbom: true
+ build-args: |
+ BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
+ VCS_REF=${{ github.sha }}
+ outputs: type=image,push=true
- - name: Inspect image
+ create-manifest:
+ name: Create Multi-Arch Manifest
+ runs-on: ubuntu-latest
+ needs:
+ - build-amd64
+ - build-arm64
+ steps:
+ - name: Login to Docker Hub
+ uses: docker/login-action@v3
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Docker meta
+ id: meta
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ secrets.DOCKERHUB_USERNAME }}/maibot
+ tags: |
+ type=ref,event=branch
+ type=ref,event=tag
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=semver,pattern={{major}}
+ type=sha
+
+ - name: Create and Push Manifest
run: |
- # 使用默认标签
- DEFAULT_TAG="${{ env.REGISTRY_IMAGE }}:latest"
-
- # 尝试从 prepare 输出中获取标签
- if [ -n "${{ needs.prepare.outputs.tags }}" ]; then
- TAGS_JSON='${{ needs.prepare.outputs.tags }}'
- FIRST_TAG=$(echo $TAGS_JSON | jq -r '.tags[0]')
- if [ -n "$FIRST_TAG" ] && [ "$FIRST_TAG" != "null" ]; then
- echo "使用从 metadata 获取的标签: $FIRST_TAG"
- docker buildx imagetools inspect $FIRST_TAG
- exit 0
- fi
- fi
-
- # 如果没有标签或提取失败,使用默认标签
- echo "使用默认标签: $DEFAULT_TAG"
- docker buildx imagetools inspect $DEFAULT_TAG
\ No newline at end of file
+ # 为每个标签创建多架构镜像
+ for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr '\n' ' '); do
+ echo "Creating manifest for $tag"
+ docker buildx imagetools create -t $tag \
+ ${{ secrets.DOCKERHUB_USERNAME }}/maibot:amd64-${{ github.sha }} \
+ ${{ secrets.DOCKERHUB_USERNAME }}/maibot:arm64-${{ github.sha }}
+ done
\ No newline at end of file
From b142c5b4e59a4238bfa2c7b75d9d4813e5e825bd Mon Sep 17 00:00:00 2001
From: Dreamwxz
Date: Sat, 10 May 2025 20:53:06 +0800
Subject: [PATCH 09/20] =?UTF-8?q?=E6=9B=B4=E6=96=B0README=E5=92=8C?=
=?UTF-8?q?=E9=83=A8=E5=88=86=E5=90=8D=E7=A7=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
EULA.md | 4 ++--
PRIVACY.md | 2 +-
README.md | 6 +++---
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/EULA.md b/EULA.md
index e21591a53..cf0fbda30 100644
--- a/EULA.md
+++ b/EULA.md
@@ -2,7 +2,7 @@
**版本:V1.0**
**更新日期:2025年5月9日**
**生效日期:2025年3月18日**
-**适用的MaiMBot版本号:所有版本**
+**适用的MaiBot版本号:所有版本**
**2025© MaiBot项目团队**
@@ -10,7 +10,7 @@
## 一、一般条款
-**1.1** MaiMBot项目(包括MaiMBot的源代码、可执行文件、文档,以及其它在本协议中所列出的文件)(以下简称“本项目”)是由开发者及贡献者(以下简称“项目团队”)共同维护,为用户提供自动回复功能的机器人代码项目。以下最终用户许可协议(EULA,以下简称“本协议”)是用户(以下简称“您”)与项目团队之间关于使用本项目所订立的合同条件。
+**1.1** MaiBot项目(包括MaiBot的源代码、可执行文件、文档,以及其它在本协议中所列出的文件)(以下简称“本项目”)是由开发者及贡献者(以下简称“项目团队”)共同维护,为用户提供自动回复功能的机器人代码项目。以下最终用户许可协议(EULA,以下简称“本协议”)是用户(以下简称“您”)与项目团队之间关于使用本项目所订立的合同条件。
**1.2** 在运行或使用本项目之前,您**必须阅读并同意本协议的所有条款**。未成年人或其它无/不完全民事行为能力责任人请**在监护人的陪同下**阅读并同意本协议。如果您不同意,则不得运行或使用本项目。在这种情况下,您应立即从您的设备上卸载或删除本项目及其所有副本。
diff --git a/PRIVACY.md b/PRIVACY.md
index 4e34a2c3c..33bc131d6 100644
--- a/PRIVACY.md
+++ b/PRIVACY.md
@@ -6,7 +6,7 @@
**2025© MaiBot项目团队**
-MaiMBot项目团队(以下简称项目团队)**尊重并保护**用户(以下简称您)的隐私。若您选择使用MaiMBot项目(以下简称本项目),则您需同意本项目按照以下隐私条款处理您的输入和输出内容:
+MaiBot项目团队(以下简称项目团队)**尊重并保护**用户(以下简称您)的隐私。若您选择使用MaiBot项目(以下简称本项目),则您需同意本项目按照以下隐私条款处理您的输入和输出内容:
**1.1** 本项目**会**收集您的输入和输出内容并发送到第三方API,用于生成新的输出内容。因此您的输入和输出内容**会**同时受到本项目和第三方API的隐私政策约束。
diff --git a/README.md b/README.md
index f349e0ca7..f07e7d57f 100644
--- a/README.md
+++ b/README.md
@@ -36,7 +36,7 @@
-## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/manual/usage/mmc_q_a
+## 新版0.6.x部署前先阅读:https://docs.mai-mai.org/faq/maibot/backup_update.html
## 📝 项目简介
@@ -85,7 +85,7 @@
### ⚠️ 重要提示
-- 升级到v0.6.x版本前请务必阅读:[升级指南](https://docs.mai-mai.org/manual/usage/mmc_q_a)
+- 升级到v0.6.x版本前请务必阅读:[升级指南](https://docs.mai-mai.org/faq/maibot/backup_update.html)
- 本版本基于MaiCore重构,通过nonebot插件与QQ平台交互
- 项目处于活跃开发阶段,功能和API可能随时调整
@@ -94,7 +94,7 @@
- [二群](https://qm.qq.com/q/RzmCiRtHEW) 571780722
- [五群](https://qm.qq.com/q/JxvHZnxyec) 1022489779
- [三群](https://qm.qq.com/q/wlH5eT8OmQ) 1035228475【已满】
-- [四群](https://qm.qq.com/q/wlH5eT8OmQ) 729957033【已满】
+- [四群](https://qm.qq.com/q/wGePTl1UyY) 729957033【已满】
From 47b3e1af28b6760bd946b6aa9beb9c4c3f0f3812 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Sun, 11 May 2025 17:12:39 +0800
Subject: [PATCH 10/20] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E7=B3=BB?=
=?UTF-8?q?=E7=BB=9F=E5=9F=BA=E6=9C=AC=E4=BF=A1=E6=81=AF=E6=8E=A5=E5=8F=A3?=
=?UTF-8?q?=EF=BC=8C=E5=8C=85=E5=90=AB=E6=93=8D=E4=BD=9C=E7=B3=BB=E7=BB=9F?=
=?UTF-8?q?=E3=80=81Python=E7=89=88=E6=9C=AC=E3=80=81CPU=E5=92=8C=E5=86=85?=
=?UTF-8?q?=E5=AD=98=E4=BD=BF=E7=94=A8=E6=83=85=E5=86=B5?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/basic_info_api.py | 152 ++++++++++++++++++++++++++++++++
src/api/config_api.py | 176 ++++++++++++++++++++++++++++++--------
src/api/main.py | 13 +++
src/common/server.py | 16 ++++
4 files changed, 322 insertions(+), 35 deletions(-)
create mode 100644 src/api/basic_info_api.py
diff --git a/src/api/basic_info_api.py b/src/api/basic_info_api.py
new file mode 100644
index 000000000..73de18f80
--- /dev/null
+++ b/src/api/basic_info_api.py
@@ -0,0 +1,152 @@
+import platform
+import psutil
+import sys
+import os
+
+def get_system_info():
+ """获取操作系统信息"""
+ return {
+ "system": platform.system(),
+ "release": platform.release(),
+ "version": platform.version(),
+ "machine": platform.machine(),
+ "processor": platform.processor(),
+ }
+
+def get_python_version():
+ """获取 Python 版本信息"""
+ return sys.version
+
+def get_cpu_usage():
+ """获取系统总CPU使用率"""
+ return psutil.cpu_percent(interval=1)
+
+def get_process_cpu_usage():
+ """获取当前进程CPU使用率"""
+ process = psutil.Process(os.getpid())
+ return process.cpu_percent(interval=1)
+
+def get_memory_usage():
+ """获取系统内存使用情况 (单位 MB)"""
+ mem = psutil.virtual_memory()
+ bytes_to_mb = lambda x: round(x / (1024 * 1024), 2) #noqa
+ return {
+ "total_mb": bytes_to_mb(mem.total),
+ "available_mb": bytes_to_mb(mem.available),
+ "percent": mem.percent,
+ "used_mb": bytes_to_mb(mem.used),
+ "free_mb": bytes_to_mb(mem.free),
+ }
+
+def get_process_memory_usage():
+ """获取当前进程内存使用情况 (单位 MB)"""
+ process = psutil.Process(os.getpid())
+ mem_info = process.memory_info()
+ bytes_to_mb = lambda x: round(x / (1024 * 1024), 2) #noqa
+ return {
+ "rss_mb": bytes_to_mb(mem_info.rss), # Resident Set Size: 实际使用物理内存
+ "vms_mb": bytes_to_mb(mem_info.vms), # Virtual Memory Size: 虚拟内存大小
+ "percent": process.memory_percent() # 进程内存使用百分比
+ }
+
+def get_disk_usage(path="/"):
+ """获取指定路径磁盘使用情况 (单位 GB)"""
+ disk = psutil.disk_usage(path)
+ bytes_to_gb = lambda x: round(x / (1024 * 1024 * 1024), 2) #noqa
+ return {
+ "total_gb": bytes_to_gb(disk.total),
+ "used_gb": bytes_to_gb(disk.used),
+ "free_gb": bytes_to_gb(disk.free),
+ "percent": disk.percent,
+ }
+
+def get_all_basic_info():
+ """获取所有基本信息并封装返回"""
+ # 对于进程CPU使用率,需要先初始化
+ process = psutil.Process(os.getpid())
+ process.cpu_percent(interval=None) # 初始化调用
+ process_cpu = process.cpu_percent(interval=0.1) # 短暂间隔获取
+
+ return {
+ "system_info": get_system_info(),
+ "python_version": get_python_version(),
+ "cpu_usage_percent": get_cpu_usage(),
+ "process_cpu_usage_percent": process_cpu,
+ "memory_usage": get_memory_usage(),
+ "process_memory_usage": get_process_memory_usage(),
+ "disk_usage_root": get_disk_usage("/"),
+ }
+
+def get_all_basic_info_string() -> str:
+ """获取所有基本信息并以带解释的字符串形式返回"""
+ info = get_all_basic_info()
+
+ sys_info = info["system_info"]
+ mem_usage = info["memory_usage"]
+ proc_mem_usage = info["process_memory_usage"]
+ disk_usage = info["disk_usage_root"]
+
+ # 对进程内存使用百分比进行格式化,保留两位小数
+ proc_mem_percent = round(proc_mem_usage['percent'], 2)
+
+ output_string = f"""[系统信息]
+ - 操作系统: {sys_info['system']} (例如: Windows, Linux)
+ - 发行版本: {sys_info['release']} (例如: 11, Ubuntu 20.04)
+ - 详细版本: {sys_info['version']}
+ - 硬件架构: {sys_info['machine']} (例如: AMD64)
+ - 处理器信息: {sys_info['processor']}
+
+[Python 环境]
+ - Python 版本: {info['python_version']}
+
+[CPU 状态]
+ - 系统总 CPU 使用率: {info['cpu_usage_percent']}%
+ - 当前进程 CPU 使用率: {info['process_cpu_usage_percent']}%
+
+[系统内存使用情况]
+ - 总物理内存: {mem_usage['total_mb']} MB
+ - 可用物理内存: {mem_usage['available_mb']} MB
+ - 物理内存使用率: {mem_usage['percent']}%
+ - 已用物理内存: {mem_usage['used_mb']} MB
+ - 空闲物理内存: {mem_usage['free_mb']} MB
+
+[当前进程内存使用情况]
+ - 实际使用物理内存 (RSS): {proc_mem_usage['rss_mb']} MB
+ - 占用虚拟内存 (VMS): {proc_mem_usage['vms_mb']} MB
+ - 进程内存使用率: {proc_mem_percent}%
+
+[磁盘使用情况 (根目录)]
+ - 总空间: {disk_usage['total_gb']} GB
+ - 已用空间: {disk_usage['used_gb']} GB
+ - 可用空间: {disk_usage['free_gb']} GB
+ - 磁盘使用率: {disk_usage['percent']}%
+"""
+ return output_string
+
+if __name__ == '__main__':
+ print(f"System Info: {get_system_info()}")
+ print(f"Python Version: {get_python_version()}")
+ print(f"CPU Usage: {get_cpu_usage()}%")
+ # 第一次调用 process.cpu_percent() 会返回0.0或一个无意义的值,需要间隔一段时间再调用
+ # 或者在初始化Process对象后,先调用一次cpu_percent(interval=None),然后再调用cpu_percent(interval=1)
+ current_process = psutil.Process(os.getpid())
+ current_process.cpu_percent(interval=None) # 初始化
+ print(f"Process CPU Usage: {current_process.cpu_percent(interval=1)}%") # 实际获取
+
+ memory_usage_info = get_memory_usage()
+ print(f"Memory Usage: Total={memory_usage_info['total_mb']}MB, Used={memory_usage_info['used_mb']}MB, Percent={memory_usage_info['percent']}%")
+
+ process_memory_info = get_process_memory_usage()
+ print(f"Process Memory Usage: RSS={process_memory_info['rss_mb']}MB, VMS={process_memory_info['vms_mb']}MB, Percent={process_memory_info['percent']}%")
+
+ disk_usage_info = get_disk_usage('/')
+ print(f"Disk Usage (Root): Total={disk_usage_info['total_gb']}GB, Used={disk_usage_info['used_gb']}GB, Percent={disk_usage_info['percent']}%")
+
+ print("\n--- All Basic Info (JSON) ---")
+ all_info = get_all_basic_info()
+ import json
+ print(json.dumps(all_info, indent=4, ensure_ascii=False))
+
+ print("\n--- All Basic Info (String with Explanations) ---")
+ info_string = get_all_basic_info_string()
+ print(info_string)
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 581c05a01..57780ad2a 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -10,7 +10,7 @@ ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
class APIBotConfig:
"""机器人配置类"""
- INNER_VERSION: Version # 配置文件内部版本号
+ INNER_VERSION: str # 配置文件内部版本号(toml为字符串)
MAI_VERSION: str # 硬编码的版本信息
# bot
@@ -34,28 +34,28 @@ class APIBotConfig:
appearance: str # 外貌特征描述
# schedule
- ENABLE_SCHEDULE_GEN: bool # 是否启用日程生成
- ENABLE_SCHEDULE_INTERACTION: bool # 是否启用日程交互
- PROMPT_SCHEDULE_GEN: str # 日程生成提示词
- SCHEDULE_DOING_UPDATE_INTERVAL: int # 日程进行中更新间隔
- SCHEDULE_TEMPERATURE: float # 日程生成温度
- TIME_ZONE: str # 时区
+ enable_schedule_gen: bool # 是否启用日程表
+ enable_schedule_interaction: bool # 日程表是否影响回复模式
+ prompt_schedule_gen: str # 日程生成提示词
+ schedule_doing_update_interval: int # 日程表更新间隔(秒)
+ schedule_temperature: float # 日程表温度
+ time_zone: str # 时区
# platforms
platforms: Dict[str, str] # 平台信息
# chat
- allow_focus_mode: bool # 是否允许专注模式
- base_normal_chat_num: int # 基础普通聊天次数
- base_focused_chat_num: int # 基础专注聊天次数
- observation_context_size: int # 观察上下文大小
+ allow_focus_mode: bool # 是否允许专注聊天状态
+ base_normal_chat_num: int # 最多允许多少个群进行普通聊天
+ base_focused_chat_num: int # 最多允许多少个群进行专注聊天
+ observation_context_size: int # 观察到的最长上下文大小
message_buffer: bool # 是否启用消息缓冲
ban_words: List[str] # 禁止词列表
ban_msgs_regex: List[str] # 禁止消息的正则表达式列表
# normal_chat
- MODEL_R1_PROBABILITY: float # 模型推理概率
- MODEL_V3_PROBABILITY: float # 模型普通概率
+ model_reasoning_probability: float # 推理模型概率
+ model_normal_probability: float # 普通模型概率
emoji_chance: float # 表情符号出现概率
thinking_timeout: int # 思考超时时间
willing_mode: str # 意愿模式
@@ -63,8 +63,8 @@ class APIBotConfig:
response_interested_rate_amplifier: float # 回复兴趣率放大器
down_frequency_rate: float # 降低频率率
emoji_response_penalty: float # 表情回复惩罚
- mentioned_bot_inevitable_reply: bool # 提到机器人时是否必定回复
- at_bot_inevitable_reply: bool # @机器人时是否必定回复
+ mentioned_bot_inevitable_reply: bool # 提及 bot 必然回复
+ at_bot_inevitable_reply: bool # @bot 必然回复
# focus_chat
reply_trigger_threshold: float # 回复触发阈值
@@ -78,24 +78,25 @@ class APIBotConfig:
# emoji
max_emoji_num: int # 最大表情符号数量
max_reach_deletion: bool # 达到最大数量时是否删除
- EMOJI_CHECK_INTERVAL: int # 表情检查间隔
- EMOJI_REGISTER_INTERVAL: Optional[int] # 表情注册间隔(兼容性保留)
- EMOJI_SAVE: bool # 是否保存表情
- EMOJI_CHECK: bool # 是否检查表情
- EMOJI_CHECK_PROMPT: str # 表情检查提示词
+ check_interval: int # 检查表情包的时间间隔(分钟)
+ save_pic: bool # 是否保存图片
+ save_emoji: bool # 是否保存表情包
+ steal_emoji: bool # 是否偷取表情包
+ enable_check: bool # 是否启用表情包过滤
+ check_prompt: str # 表情包过滤要求
# memory
- build_memory_interval: int # 构建记忆间隔
- memory_build_distribution: List[float] # 记忆构建分布
- build_memory_sample_num: int # 构建记忆样本数量
- build_memory_sample_length: int # 构建记忆样本长度
+ build_memory_interval: int # 记忆构建间隔
+ build_memory_distribution: List[float] # 记忆构建分布
+ build_memory_sample_num: int # 采样数量
+ build_memory_sample_length: int # 采样长度
memory_compress_rate: float # 记忆压缩率
- forget_memory_interval: int # 忘记记忆间隔
- memory_forget_time: int # 记忆忘记时间
- memory_forget_percentage: float # 记忆忘记百分比
- consolidate_memory_interval: int # 巩固记忆间隔
- consolidation_similarity_threshold: float # 巩固相似度阈值
- consolidation_check_percentage: float # 巩固检查百分比
+ forget_memory_interval: int # 记忆遗忘间隔
+ memory_forget_time: int # 记忆遗忘时间(小时)
+ memory_forget_percentage: float # 记忆遗忘比例
+ consolidate_memory_interval: int # 记忆整合间隔
+ consolidation_similarity_threshold: float # 相似度阈值
+ consolidation_check_percentage: float # 检查节点比例
memory_ban_words: List[str] # 记忆禁止词列表
# mood
@@ -128,21 +129,19 @@ class APIBotConfig:
# experimental
enable_friend_chat: bool # 是否启用好友聊天
talk_allowed_private: List[int] # 允许私聊的QQ号列表
- enable_pfc_chatting: bool # 是否启用PFC聊天
+ pfc_chatting: bool # 是否启用PFC聊天
# 模型配置
llm_reasoning: Dict[str, Any] # 推理模型配置
llm_normal: Dict[str, Any] # 普通模型配置
llm_topic_judge: Dict[str, Any] # 主题判断模型配置
llm_summary: Dict[str, Any] # 总结模型配置
- llm_emotion_judge: Optional[Dict[str, Any]] # 情绪判断模型配置(兼容性保留)
- embedding: Dict[str, Any] # 嵌入模型配置
vlm: Dict[str, Any] # VLM模型配置
- moderation: Optional[Dict[str, Any]] # 审核模型配置(兼容性保留)
+ llm_heartflow: Dict[str, Any] # 心流模型配置
llm_observation: Dict[str, Any] # 观察模型配置
llm_sub_heartflow: Dict[str, Any] # 子心流模型配置
- llm_heartflow: Dict[str, Any] # 心流模型配置
llm_plan: Optional[Dict[str, Any]] # 计划模型配置
+ embedding: Dict[str, Any] # 嵌入模型配置
llm_PFC_action_planner: Optional[Dict[str, Any]] # PFC行动计划模型配置
llm_PFC_chat: Optional[Dict[str, Any]] # PFC聊天模型配置
llm_PFC_reply_checker: Optional[Dict[str, Any]] # PFC回复检查模型配置
@@ -150,6 +149,63 @@ class APIBotConfig:
api_urls: Optional[Dict[str, str]] # API地址配置
+ @staticmethod
+ def validate_config(config: dict):
+ """
+ 校验传入的 toml 配置字典是否合法。
+ :param config: toml库load后的配置字典
+ :raises: ValueError, KeyError, TypeError
+ """
+ # 检查主层级
+ required_sections = [
+ "inner", "bot", "groups", "personality", "identity", "schedule",
+ "platforms", "chat", "normal_chat", "focus_chat", "emoji", "memory",
+ "mood", "keywords_reaction", "chinese_typo", "response_splitter",
+ "remote", "experimental", "model"
+ ]
+ for section in required_sections:
+ if section not in config:
+ raise KeyError(f"缺少配置段: [{section}]")
+
+ # 检查部分关键字段
+ if "version" not in config["inner"]:
+ raise KeyError("缺少 inner.version 字段")
+ if not isinstance(config["inner"]["version"], str):
+ raise TypeError("inner.version 必须为字符串")
+
+ if "qq" not in config["bot"]:
+ raise KeyError("缺少 bot.qq 字段")
+ if not isinstance(config["bot"]["qq"], int):
+ raise TypeError("bot.qq 必须为整数")
+
+ if "personality_core" not in config["personality"]:
+ raise KeyError("缺少 personality.personality_core 字段")
+ if not isinstance(config["personality"]["personality_core"], str):
+ raise TypeError("personality.personality_core 必须为字符串")
+
+ if "identity_detail" not in config["identity"]:
+ raise KeyError("缺少 identity.identity_detail 字段")
+ if not isinstance(config["identity"]["identity_detail"], list):
+ raise TypeError("identity.identity_detail 必须为列表")
+
+ # 可继续添加更多字段的类型和值检查
+ # ...
+
+ # 检查模型配置
+ model_keys = [
+ "llm_reasoning", "llm_normal", "llm_topic_judge", "llm_summary",
+ "vlm", "llm_heartflow", "llm_observation", "llm_sub_heartflow",
+ "embedding"
+ ]
+ if "model" not in config:
+ raise KeyError("缺少 [model] 配置段")
+ for key in model_keys:
+ if key not in config["model"]:
+ raise KeyError(f"缺少 model.{key} 配置")
+
+ # 检查通过
+ return True
+
@strawberry.type
class APIEnvConfig:
@@ -182,6 +238,56 @@ class APIEnvConfig:
def get_env(self) -> str:
return "env"
+ @staticmethod
+ def validate_config(config: dict):
+ """
+ 校验环境变量配置字典是否合法。
+ :param config: 环境变量配置字典
+ :raises: KeyError, TypeError
+ """
+ required_fields = [
+ "HOST", "PORT", "PLUGINS", "MONGODB_HOST", "MONGODB_PORT", "DATABASE_NAME",
+ "CHAT_ANY_WHERE_BASE_URL", "SILICONFLOW_BASE_URL", "DEEP_SEEK_BASE_URL"
+ ]
+ for field in required_fields:
+ if field not in config:
+ raise KeyError(f"缺少环境变量配置字段: {field}")
+
+ if not isinstance(config["HOST"], str):
+ raise TypeError("HOST 必须为字符串")
+ if not isinstance(config["PORT"], int):
+ raise TypeError("PORT 必须为整数")
+ if not isinstance(config["PLUGINS"], list):
+ raise TypeError("PLUGINS 必须为列表")
+ if not isinstance(config["MONGODB_HOST"], str):
+ raise TypeError("MONGODB_HOST 必须为字符串")
+ if not isinstance(config["MONGODB_PORT"], int):
+ raise TypeError("MONGODB_PORT 必须为整数")
+ if not isinstance(config["DATABASE_NAME"], str):
+ raise TypeError("DATABASE_NAME 必须为字符串")
+ if not isinstance(config["CHAT_ANY_WHERE_BASE_URL"], str):
+ raise TypeError("CHAT_ANY_WHERE_BASE_URL 必须为字符串")
+ if not isinstance(config["SILICONFLOW_BASE_URL"], str):
+ raise TypeError("SILICONFLOW_BASE_URL 必须为字符串")
+ if not isinstance(config["DEEP_SEEK_BASE_URL"], str):
+ raise TypeError("DEEP_SEEK_BASE_URL 必须为字符串")
+
+ # 可选字段类型检查
+ optional_str_fields = [
+ "DEEP_SEEK_KEY", "CHAT_ANY_WHERE_KEY", "SILICONFLOW_KEY",
+ "CONSOLE_LOG_LEVEL", "FILE_LOG_LEVEL",
+ "DEFAULT_CONSOLE_LOG_LEVEL", "DEFAULT_FILE_LOG_LEVEL"
+ ]
+ for field in optional_str_fields:
+ if field in config and config[field] is not None and not isinstance(config[field], str):
+ raise TypeError(f"{field} 必须为字符串或None")
+
+ if "SIMPLE_OUTPUT" in config and config["SIMPLE_OUTPUT"] is not None and not isinstance(config["SIMPLE_OUTPUT"], bool):
+ raise TypeError("SIMPLE_OUTPUT 必须为布尔值或None")
+
+ # 检查通过
+ return True
+
print("当前路径:")
print(ROOT_PATH)
diff --git a/src/api/main.py b/src/api/main.py
index 4378ff1ed..48b03b586 100644
--- a/src/api/main.py
+++ b/src/api/main.py
@@ -16,6 +16,7 @@ from src.api.apiforgui import (
get_all_states,
)
from src.heart_flow.sub_heartflow import ChatState
+from src.api.basic_info_api import get_all_basic_info # 新增导入
# import uvicorn
# import os
@@ -97,6 +98,18 @@ async def get_all_states_api():
return {"status": "failed", "reason": "failed to get all states"}
+@router.get("/info")
+async def get_system_basic_info():
+ """获取系统基本信息"""
+ logger.info("请求系统基本信息")
+ try:
+ info = get_all_basic_info()
+ return {"status": "success", "data": info}
+ except Exception as e:
+ logger.error(f"获取系统基本信息失败: {e}")
+ return {"status": "failed", "reason": str(e)}
+
+
def start_api_server():
"""启动API服务器"""
global_server.register_router(router, prefix="/api/v1")
diff --git a/src/common/server.py b/src/common/server.py
index ff6106a7c..9f4a94592 100644
--- a/src/common/server.py
+++ b/src/common/server.py
@@ -1,4 +1,5 @@
from fastapi import FastAPI, APIRouter
+from fastapi.middleware.cors import CORSMiddleware # 新增导入
from typing import Optional
from uvicorn import Config, Server as UvicornServer
import os
@@ -15,6 +16,21 @@ class Server:
self._server: Optional[UvicornServer] = None
self.set_address(host, port)
+ # 配置 CORS
+ origins = [
+ "http://localhost:3000", # 允许的前端源
+ "http://127.0.0.1:3000",
+ # 在生产环境中,您应该添加实际的前端域名
+ ]
+
+ self.app.add_middleware(
+ CORSMiddleware,
+ allow_origins=origins,
+ allow_credentials=True, # 是否支持 cookie
+ allow_methods=["*"], # 允许所有 HTTP 方法
+ allow_headers=["*"], # 允许所有 HTTP 请求头
+ )
+
def register_router(self, router: APIRouter, prefix: str = ""):
"""注册路由
From c86b043f28d585517843666041bba0488c0fa146 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Sun, 11 May 2025 17:13:43 +0800
Subject: [PATCH 11/20] fix: Ruff
---
src/api/config_api.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 57780ad2a..6c4a9b8e1 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -1,6 +1,6 @@
from typing import List, Optional, Dict, Any
import strawberry
-from packaging.version import Version
+# from packaging.version import Version
import os
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
From f71242d5714f3b26d179db93e40e70526e8690bf Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
Date: Sun, 11 May 2025 09:13:56 +0000
Subject: [PATCH 12/20] =?UTF-8?q?=F0=9F=A4=96=20=E8=87=AA=E5=8A=A8?=
=?UTF-8?q?=E6=A0=BC=E5=BC=8F=E5=8C=96=E4=BB=A3=E7=A0=81=20[skip=20ci]?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/api/basic_info_api.py | 89 +++++++++++++++++++++++----------------
src/api/config_api.py | 63 +++++++++++++++++++++------
2 files changed, 103 insertions(+), 49 deletions(-)
diff --git a/src/api/basic_info_api.py b/src/api/basic_info_api.py
index 73de18f80..4e5fa4c7d 100644
--- a/src/api/basic_info_api.py
+++ b/src/api/basic_info_api.py
@@ -3,6 +3,7 @@ import psutil
import sys
import os
+
def get_system_info():
"""获取操作系统信息"""
return {
@@ -13,23 +14,27 @@ def get_system_info():
"processor": platform.processor(),
}
+
def get_python_version():
"""获取 Python 版本信息"""
return sys.version
+
def get_cpu_usage():
"""获取系统总CPU使用率"""
return psutil.cpu_percent(interval=1)
+
def get_process_cpu_usage():
"""获取当前进程CPU使用率"""
process = psutil.Process(os.getpid())
return process.cpu_percent(interval=1)
+
def get_memory_usage():
"""获取系统内存使用情况 (单位 MB)"""
mem = psutil.virtual_memory()
- bytes_to_mb = lambda x: round(x / (1024 * 1024), 2) #noqa
+ bytes_to_mb = lambda x: round(x / (1024 * 1024), 2) # noqa
return {
"total_mb": bytes_to_mb(mem.total),
"available_mb": bytes_to_mb(mem.available),
@@ -38,21 +43,23 @@ def get_memory_usage():
"free_mb": bytes_to_mb(mem.free),
}
+
def get_process_memory_usage():
"""获取当前进程内存使用情况 (单位 MB)"""
process = psutil.Process(os.getpid())
mem_info = process.memory_info()
- bytes_to_mb = lambda x: round(x / (1024 * 1024), 2) #noqa
+ bytes_to_mb = lambda x: round(x / (1024 * 1024), 2) # noqa
return {
"rss_mb": bytes_to_mb(mem_info.rss), # Resident Set Size: 实际使用物理内存
"vms_mb": bytes_to_mb(mem_info.vms), # Virtual Memory Size: 虚拟内存大小
- "percent": process.memory_percent() # 进程内存使用百分比
+ "percent": process.memory_percent(), # 进程内存使用百分比
}
+
def get_disk_usage(path="/"):
"""获取指定路径磁盘使用情况 (单位 GB)"""
disk = psutil.disk_usage(path)
- bytes_to_gb = lambda x: round(x / (1024 * 1024 * 1024), 2) #noqa
+ bytes_to_gb = lambda x: round(x / (1024 * 1024 * 1024), 2) # noqa
return {
"total_gb": bytes_to_gb(disk.total),
"used_gb": bytes_to_gb(disk.used),
@@ -60,12 +67,13 @@ def get_disk_usage(path="/"):
"percent": disk.percent,
}
+
def get_all_basic_info():
"""获取所有基本信息并封装返回"""
# 对于进程CPU使用率,需要先初始化
process = psutil.Process(os.getpid())
process.cpu_percent(interval=None) # 初始化调用
- process_cpu = process.cpu_percent(interval=0.1) # 短暂间隔获取
+ process_cpu = process.cpu_percent(interval=0.1) # 短暂间隔获取
return {
"system_info": get_system_info(),
@@ -77,74 +85,83 @@ def get_all_basic_info():
"disk_usage_root": get_disk_usage("/"),
}
+
def get_all_basic_info_string() -> str:
"""获取所有基本信息并以带解释的字符串形式返回"""
info = get_all_basic_info()
-
+
sys_info = info["system_info"]
mem_usage = info["memory_usage"]
proc_mem_usage = info["process_memory_usage"]
disk_usage = info["disk_usage_root"]
# 对进程内存使用百分比进行格式化,保留两位小数
- proc_mem_percent = round(proc_mem_usage['percent'], 2)
+ proc_mem_percent = round(proc_mem_usage["percent"], 2)
output_string = f"""[系统信息]
- - 操作系统: {sys_info['system']} (例如: Windows, Linux)
- - 发行版本: {sys_info['release']} (例如: 11, Ubuntu 20.04)
- - 详细版本: {sys_info['version']}
- - 硬件架构: {sys_info['machine']} (例如: AMD64)
- - 处理器信息: {sys_info['processor']}
+ - 操作系统: {sys_info["system"]} (例如: Windows, Linux)
+ - 发行版本: {sys_info["release"]} (例如: 11, Ubuntu 20.04)
+ - 详细版本: {sys_info["version"]}
+ - 硬件架构: {sys_info["machine"]} (例如: AMD64)
+ - 处理器信息: {sys_info["processor"]}
[Python 环境]
- - Python 版本: {info['python_version']}
+ - Python 版本: {info["python_version"]}
[CPU 状态]
- - 系统总 CPU 使用率: {info['cpu_usage_percent']}%
- - 当前进程 CPU 使用率: {info['process_cpu_usage_percent']}%
+ - 系统总 CPU 使用率: {info["cpu_usage_percent"]}%
+ - 当前进程 CPU 使用率: {info["process_cpu_usage_percent"]}%
[系统内存使用情况]
- - 总物理内存: {mem_usage['total_mb']} MB
- - 可用物理内存: {mem_usage['available_mb']} MB
- - 物理内存使用率: {mem_usage['percent']}%
- - 已用物理内存: {mem_usage['used_mb']} MB
- - 空闲物理内存: {mem_usage['free_mb']} MB
+ - 总物理内存: {mem_usage["total_mb"]} MB
+ - 可用物理内存: {mem_usage["available_mb"]} MB
+ - 物理内存使用率: {mem_usage["percent"]}%
+ - 已用物理内存: {mem_usage["used_mb"]} MB
+ - 空闲物理内存: {mem_usage["free_mb"]} MB
[当前进程内存使用情况]
- - 实际使用物理内存 (RSS): {proc_mem_usage['rss_mb']} MB
- - 占用虚拟内存 (VMS): {proc_mem_usage['vms_mb']} MB
+ - 实际使用物理内存 (RSS): {proc_mem_usage["rss_mb"]} MB
+ - 占用虚拟内存 (VMS): {proc_mem_usage["vms_mb"]} MB
- 进程内存使用率: {proc_mem_percent}%
[磁盘使用情况 (根目录)]
- - 总空间: {disk_usage['total_gb']} GB
- - 已用空间: {disk_usage['used_gb']} GB
- - 可用空间: {disk_usage['free_gb']} GB
- - 磁盘使用率: {disk_usage['percent']}%
+ - 总空间: {disk_usage["total_gb"]} GB
+ - 已用空间: {disk_usage["used_gb"]} GB
+ - 可用空间: {disk_usage["free_gb"]} GB
+ - 磁盘使用率: {disk_usage["percent"]}%
"""
return output_string
-if __name__ == '__main__':
+
+if __name__ == "__main__":
print(f"System Info: {get_system_info()}")
print(f"Python Version: {get_python_version()}")
print(f"CPU Usage: {get_cpu_usage()}%")
# 第一次调用 process.cpu_percent() 会返回0.0或一个无意义的值,需要间隔一段时间再调用
# 或者在初始化Process对象后,先调用一次cpu_percent(interval=None),然后再调用cpu_percent(interval=1)
current_process = psutil.Process(os.getpid())
- current_process.cpu_percent(interval=None) # 初始化
- print(f"Process CPU Usage: {current_process.cpu_percent(interval=1)}%") # 实际获取
-
+ current_process.cpu_percent(interval=None) # 初始化
+ print(f"Process CPU Usage: {current_process.cpu_percent(interval=1)}%") # 实际获取
+
memory_usage_info = get_memory_usage()
- print(f"Memory Usage: Total={memory_usage_info['total_mb']}MB, Used={memory_usage_info['used_mb']}MB, Percent={memory_usage_info['percent']}%")
-
+ print(
+ f"Memory Usage: Total={memory_usage_info['total_mb']}MB, Used={memory_usage_info['used_mb']}MB, Percent={memory_usage_info['percent']}%"
+ )
+
process_memory_info = get_process_memory_usage()
- print(f"Process Memory Usage: RSS={process_memory_info['rss_mb']}MB, VMS={process_memory_info['vms_mb']}MB, Percent={process_memory_info['percent']}%")
-
- disk_usage_info = get_disk_usage('/')
- print(f"Disk Usage (Root): Total={disk_usage_info['total_gb']}GB, Used={disk_usage_info['used_gb']}GB, Percent={disk_usage_info['percent']}%")
+ print(
+ f"Process Memory Usage: RSS={process_memory_info['rss_mb']}MB, VMS={process_memory_info['vms_mb']}MB, Percent={process_memory_info['percent']}%"
+ )
+
+ disk_usage_info = get_disk_usage("/")
+ print(
+ f"Disk Usage (Root): Total={disk_usage_info['total_gb']}GB, Used={disk_usage_info['used_gb']}GB, Percent={disk_usage_info['percent']}%"
+ )
print("\n--- All Basic Info (JSON) ---")
all_info = get_all_basic_info()
import json
+
print(json.dumps(all_info, indent=4, ensure_ascii=False))
print("\n--- All Basic Info (String with Explanations) ---")
diff --git a/src/api/config_api.py b/src/api/config_api.py
index 6c4a9b8e1..275938045 100644
--- a/src/api/config_api.py
+++ b/src/api/config_api.py
@@ -1,5 +1,6 @@
from typing import List, Optional, Dict, Any
import strawberry
+
# from packaging.version import Version
import os
@@ -158,10 +159,25 @@ class APIBotConfig:
"""
# 检查主层级
required_sections = [
- "inner", "bot", "groups", "personality", "identity", "schedule",
- "platforms", "chat", "normal_chat", "focus_chat", "emoji", "memory",
- "mood", "keywords_reaction", "chinese_typo", "response_splitter",
- "remote", "experimental", "model"
+ "inner",
+ "bot",
+ "groups",
+ "personality",
+ "identity",
+ "schedule",
+ "platforms",
+ "chat",
+ "normal_chat",
+ "focus_chat",
+ "emoji",
+ "memory",
+ "mood",
+ "keywords_reaction",
+ "chinese_typo",
+ "response_splitter",
+ "remote",
+ "experimental",
+ "model",
]
for section in required_sections:
if section not in config:
@@ -193,9 +209,15 @@ class APIBotConfig:
# 检查模型配置
model_keys = [
- "llm_reasoning", "llm_normal", "llm_topic_judge", "llm_summary",
- "vlm", "llm_heartflow", "llm_observation", "llm_sub_heartflow",
- "embedding"
+ "llm_reasoning",
+ "llm_normal",
+ "llm_topic_judge",
+ "llm_summary",
+ "vlm",
+ "llm_heartflow",
+ "llm_observation",
+ "llm_sub_heartflow",
+ "embedding",
]
if "model" not in config:
raise KeyError("缺少 [model] 配置段")
@@ -246,8 +268,15 @@ class APIEnvConfig:
:raises: KeyError, TypeError
"""
required_fields = [
- "HOST", "PORT", "PLUGINS", "MONGODB_HOST", "MONGODB_PORT", "DATABASE_NAME",
- "CHAT_ANY_WHERE_BASE_URL", "SILICONFLOW_BASE_URL", "DEEP_SEEK_BASE_URL"
+ "HOST",
+ "PORT",
+ "PLUGINS",
+ "MONGODB_HOST",
+ "MONGODB_PORT",
+ "DATABASE_NAME",
+ "CHAT_ANY_WHERE_BASE_URL",
+ "SILICONFLOW_BASE_URL",
+ "DEEP_SEEK_BASE_URL",
]
for field in required_fields:
if field not in config:
@@ -274,15 +303,23 @@ class APIEnvConfig:
# 可选字段类型检查
optional_str_fields = [
- "DEEP_SEEK_KEY", "CHAT_ANY_WHERE_KEY", "SILICONFLOW_KEY",
- "CONSOLE_LOG_LEVEL", "FILE_LOG_LEVEL",
- "DEFAULT_CONSOLE_LOG_LEVEL", "DEFAULT_FILE_LOG_LEVEL"
+ "DEEP_SEEK_KEY",
+ "CHAT_ANY_WHERE_KEY",
+ "SILICONFLOW_KEY",
+ "CONSOLE_LOG_LEVEL",
+ "FILE_LOG_LEVEL",
+ "DEFAULT_CONSOLE_LOG_LEVEL",
+ "DEFAULT_FILE_LOG_LEVEL",
]
for field in optional_str_fields:
if field in config and config[field] is not None and not isinstance(config[field], str):
raise TypeError(f"{field} 必须为字符串或None")
- if "SIMPLE_OUTPUT" in config and config["SIMPLE_OUTPUT"] is not None and not isinstance(config["SIMPLE_OUTPUT"], bool):
+ if (
+ "SIMPLE_OUTPUT" in config
+ and config["SIMPLE_OUTPUT"] is not None
+ and not isinstance(config["SIMPLE_OUTPUT"], bool)
+ ):
raise TypeError("SIMPLE_OUTPUT 必须为布尔值或None")
# 检查通过
From 08da999105e9a3ff27c42511865be0542bae48bd Mon Sep 17 00:00:00 2001
From: UnCLAS-Prommer
Date: Sun, 11 May 2025 20:13:09 +0800
Subject: [PATCH 13/20] =?UTF-8?q?=E8=A1=A5=E5=85=A8requirements.txt?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
requirements.txt | Bin 760 -> 774 bytes
1 file changed, 0 insertions(+), 0 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 8779b40e12d8f03dfe833f5736203c1720685508..12c557ded603e4fab6a0ce71154c4165229be4ae 100644
GIT binary patch
delta 22
dcmeyt+Qzow2NNF`LjglELn%WELncEG0{~H}1?>O;
delta 7
OcmZo;`@y>52NM7c!viz`
From 248eea20bb2926b664c6ca6006dcb43eeb6247ce Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Mon, 12 May 2025 22:06:56 +0800
Subject: [PATCH 14/20] =?UTF-8?q?fix:=20=E5=BF=83=E6=B5=81=E7=A5=9E?=
=?UTF-8?q?=E7=A7=98bug?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/heart_flow/sub_heartflow.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/heart_flow/sub_heartflow.py b/src/heart_flow/sub_heartflow.py
index e2a36dbd7..5be0d73cd 100644
--- a/src/heart_flow/sub_heartflow.py
+++ b/src/heart_flow/sub_heartflow.py
@@ -250,7 +250,7 @@ class SubHeartflow:
elif new_state == ChatState.ABSENT:
logger.info(f"{log_prefix} 进入 ABSENT 状态,停止所有聊天活动...")
- await self.clear_interest_dict()
+ self.clear_interest_dict()
await self._stop_normal_chat()
await self._stop_heart_fc_chat()
From b2b43c140f5b947a54def45627bcc698a0c5f610 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=A2=A8=E6=A2=93=E6=9F=92?= <1787882683@qq.com>
Date: Mon, 12 May 2025 22:15:12 +0800
Subject: [PATCH 15/20] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E7=A5=9E=E7=A7=98bug?=
=?UTF-8?q?=20x2?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/heartFC_chat/normal_chat.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/src/plugins/heartFC_chat/normal_chat.py b/src/plugins/heartFC_chat/normal_chat.py
index e921f85ce..be05f4d70 100644
--- a/src/plugins/heartFC_chat/normal_chat.py
+++ b/src/plugins/heartFC_chat/normal_chat.py
@@ -352,6 +352,8 @@ class NormalChat:
# --- 新增:处理初始高兴趣消息的私有方法 ---
async def _process_initial_interest_messages(self):
"""处理启动时存在于 interest_dict 中的高兴趣消息。"""
+ if not self.interest_dict:
+ return # 如果 interest_dict 为 None或空,直接返回
items_to_process = list(self.interest_dict.items())
if not items_to_process:
return # 没有初始消息,直接返回
From 46d15b1fe769bf2a0795f0eb85c508f35dd01363 Mon Sep 17 00:00:00 2001
From: Oct-autumn
Date: Wed, 7 May 2025 18:20:26 +0800
Subject: [PATCH 16/20] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0=E5=BC=82?=
=?UTF-8?q?=E6=AD=A5=E4=BB=BB=E5=8A=A1=E7=AE=A1=E7=90=86=E5=99=A8=E5=92=8C?=
=?UTF-8?q?=E6=9C=AC=E5=9C=B0=E5=AD=98=E5=82=A8=E7=AE=A1=E7=90=86=E5=99=A8?=
=?UTF-8?q?=EF=BC=8C=E9=87=8D=E6=9E=84=E7=BB=9F=E8=AE=A1=E6=A8=A1=E5=9D=97?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
bot.py | 43 +-
src/main.py | 19 +-
src/manager/async_task_manager.py | 150 ++++++
src/manager/local_store_manager.py | 67 +++
src/plugins/utils/statistic.py | 778 ++++++++++++++++++-----------
5 files changed, 725 insertions(+), 332 deletions(-)
create mode 100644 src/manager/async_task_manager.py
create mode 100644 src/manager/local_store_manager.py
diff --git a/bot.py b/bot.py
index 8cecff756..3737279d3 100644
--- a/bot.py
+++ b/bot.py
@@ -1,7 +1,6 @@
import asyncio
import hashlib
import os
-import shutil
import sys
from pathlib import Path
import time
@@ -15,6 +14,8 @@ from src.common.crash_logger import install_crash_handler
from src.main import MainSystem
from rich.traceback import install
+from src.manager.async_task_manager import async_task_manager
+
install(extra_lines=3)
# 设置工作目录为脚本所在目录
@@ -64,38 +65,6 @@ def easter_egg():
print(rainbow_text)
-def init_config():
- # 初次启动检测
- if not os.path.exists("config/bot_config.toml"):
- logger.warning("检测到bot_config.toml不存在,正在从模板复制")
-
- # 检查config目录是否存在
- if not os.path.exists("config"):
- os.makedirs("config")
- logger.info("创建config目录")
-
- shutil.copy("template/bot_config_template.toml", "config/bot_config.toml")
- logger.info("复制完成,请修改config/bot_config.toml和.env中的配置后重新启动")
- if not os.path.exists("config/lpmm_config.toml"):
- logger.warning("检测到lpmm_config.toml不存在,正在从模板复制")
-
- # 检查config目录是否存在
- if not os.path.exists("config"):
- os.makedirs("config")
- logger.info("创建config目录")
-
- shutil.copy("template/lpmm_config_template.toml", "config/lpmm_config.toml")
- logger.info("复制完成,请修改config/lpmm_config.toml和.env中的配置后重新启动")
-
-
-def init_env():
- # 检测.env文件是否存在
- if not os.path.exists(".env"):
- logger.error("检测到.env文件不存在")
- shutil.copy("template/template.env", "./.env")
- logger.info("已从template/template.env复制创建.env,请修改配置后重新启动")
-
-
def load_env():
# 直接加载生产环境变量配置
if os.path.exists(".env"):
@@ -140,6 +109,10 @@ def scan_provider(env_config: dict):
async def graceful_shutdown():
try:
logger.info("正在优雅关闭麦麦...")
+
+ # 停止所有异步任务
+ await async_task_manager.stop_and_wait_all_tasks()
+
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
for task in tasks:
task.cancel()
@@ -235,9 +208,9 @@ def raw_main():
check_eula()
print("检查EULA和隐私条款完成")
+
easter_egg()
- init_config()
- init_env()
+
load_env()
env_config = {key: os.getenv(key) for key in os.environ}
diff --git a/src/main.py b/src/main.py
index be71524e2..ef0828918 100644
--- a/src/main.py
+++ b/src/main.py
@@ -1,6 +1,8 @@
import asyncio
import time
-from .plugins.utils.statistic import LLMStatistics
+
+from .manager.async_task_manager import async_task_manager
+from .plugins.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
from .plugins.moods.moods import MoodManager
from .plugins.schedule.schedule_generator import bot_schedule
from .plugins.emoji_system.emoji_manager import emoji_manager
@@ -26,11 +28,13 @@ logger = get_logger("main")
class MainSystem:
+ mood_manager: MoodManager
+ hippocampus_manager: HippocampusManager
+ individuality: Individuality
+
def __init__(self):
- self.llm_stats = LLMStatistics("llm_statistics.txt")
self.mood_manager = MoodManager.get_instance()
self.hippocampus_manager = HippocampusManager.get_instance()
- self._message_manager_started = False
self.individuality = Individuality.get_instance()
# 使用消息API替代直接的FastAPI实例
@@ -51,9 +55,12 @@ class MainSystem:
async def _init_components(self):
"""初始化其他组件"""
init_start_time = time.time()
- # 启动LLM统计
- self.llm_stats.start()
- logger.success("LLM统计功能启动成功")
+
+ # 添加在线时间统计任务
+ await async_task_manager.add_task(OnlineTimeRecordTask())
+
+ # 添加统计信息输出任务
+ await async_task_manager.add_task(StatisticOutputTask())
# 启动API服务器
start_api_server()
diff --git a/src/manager/async_task_manager.py b/src/manager/async_task_manager.py
new file mode 100644
index 000000000..6a3f1b813
--- /dev/null
+++ b/src/manager/async_task_manager.py
@@ -0,0 +1,150 @@
+from abc import abstractmethod
+
+import asyncio
+from asyncio import Task, Event, Lock
+from typing import Callable, Dict
+
+from src.common.logger_manager import get_logger
+
+logger = get_logger("async_task_manager")
+
+
+class AsyncTask:
+ """异步任务基类"""
+
+ def __init__(self, task_name: str | None = None, wait_before_start: int = 0, run_interval: int = 0):
+ self.task_name: str = task_name or self.__class__.__name__
+ """任务名称"""
+
+ self.wait_before_start: int = wait_before_start
+ """运行任务前是否进行等待(单位:秒,设为0则不等待)"""
+
+ self.run_interval: int = run_interval
+ """多次运行的时间间隔(单位:秒,设为0则仅运行一次)"""
+
+ @abstractmethod
+ async def run(self):
+ """
+ 任务的执行过程
+ """
+ pass
+
+ async def start_task(self, abort_flag: asyncio.Event):
+ if self.wait_before_start > 0:
+ # 等待指定时间后开始任务
+ await asyncio.sleep(self.wait_before_start)
+
+ while not abort_flag.is_set():
+ await self.run()
+ if self.run_interval > 0:
+ await asyncio.sleep(self.run_interval)
+ else:
+ break
+
+
+class AsyncTaskManager:
+ """异步任务管理器"""
+
+ def __init__(self):
+ self.tasks: Dict[str, Task] = {}
+ """任务列表"""
+
+ self.abort_flag: Event = Event()
+ """是否中止任务标志"""
+
+ self._lock: Lock = Lock()
+ """异步锁,当可能出现await时需要加锁"""
+
+ def _remove_task_call_back(self, task: Task):
+ """
+ call_back: 任务完成后移除任务
+ """
+ task_name = task.get_name()
+ if task_name in self.tasks:
+ # 任务完成后移除任务
+ del self.tasks[task_name]
+ logger.debug(f"已移除任务 '{task_name}'")
+ else:
+ logger.warning(f"尝试移除不存在的任务 '{task_name}'")
+
+ @staticmethod
+ def _default_finish_call_back(task: Task):
+ """
+ call_back: 默认的任务完成回调函数
+ """
+ try:
+ task.result()
+ logger.debug(f"任务 '{task.get_name()}' 完成")
+ except asyncio.CancelledError:
+ logger.debug(f"任务 '{task.get_name()}' 被取消")
+ except Exception as e:
+ logger.error(f"任务 '{task.get_name()}' 执行时发生异常: {e}", exc_info=True)
+
+ async def add_task(self, task: AsyncTask, call_back: Callable[[asyncio.Task], None] | None = None):
+ """
+ 添加任务
+ """
+ if not issubclass(task.__class__, AsyncTask):
+ raise TypeError(f"task '{task.__class__.__name__}' 必须是继承 AsyncTask 的子类")
+
+ with self._lock: # 由于可能需要await等待任务完成,所以需要加异步锁
+ if task.task_name in self.tasks:
+ logger.warning(f"已存在名称为 '{task.task_name}' 的任务,正在尝试取消并替换")
+ self.tasks[task.task_name].cancel() # 取消已存在的任务
+ await self.tasks[task.task_name] # 等待任务完成
+ logger.info(f"成功结束任务 '{task.task_name}'")
+
+ # 创建新任务
+ task_inst = asyncio.create_task(task.start_task(self.abort_flag))
+ task_inst.set_name(task.task_name)
+ task_inst.add_done_callback(self._remove_task_call_back) # 添加完成回调函数-完成任务后自动移除任务
+ task_inst.add_done_callback(
+ call_back or self._default_finish_call_back
+ ) # 添加完成回调函数-用户自定义,或默认的FallBack
+
+ self.tasks[task.task_name] = task_inst # 将任务添加到任务列表
+ logger.info(f"已启动任务 '{task.task_name}'")
+
+ def get_tasks_status(self) -> Dict[str, Dict[str, str]]:
+ """
+ 获取所有任务的状态
+ """
+ tasks_status = {}
+ for task_name, task in self.tasks.items():
+ tasks_status[task_name] = {
+ "status": "running" if not task.done() else "done",
+ }
+ return tasks_status
+
+ async def stop_and_wait_all_tasks(self):
+ """
+ 终止所有任务并等待它们完成(该方法会阻塞其它尝试add_task()的操作)
+ """
+ with self._lock: # 由于可能需要await等待任务完成,所以需要加异步锁
+ # 设置中止标志
+ self.abort_flag.set()
+ # 取消所有任务
+ for name, inst in self.tasks.items():
+ try:
+ inst.cancel()
+ except asyncio.CancelledError:
+ logger.info(f"已取消任务 '{name}'")
+
+ # 等待所有任务完成
+ for task_name, task_inst in self.tasks.items():
+ if not task_inst.done():
+ try:
+ await task_inst
+ except asyncio.CancelledError: # 此处再次捕获取消异常,防止stop_all_tasks()时延迟抛出异常
+ logger.info(f"任务 {task_name} 已取消")
+ except Exception as e:
+ logger.error(f"任务 {task_name} 执行时发生异常: {e}", ext_info=True)
+
+ # 清空任务列表
+ self.tasks.clear()
+ self.abort_flag.clear()
+ logger.info("所有异步任务已停止")
+
+
+async_task_manager = AsyncTaskManager()
+"""全局异步任务管理器实例"""
diff --git a/src/manager/local_store_manager.py b/src/manager/local_store_manager.py
new file mode 100644
index 000000000..f172d8890
--- /dev/null
+++ b/src/manager/local_store_manager.py
@@ -0,0 +1,67 @@
+import json
+import os
+
+from src.common.logger_manager import get_logger
+
+LOCAL_STORE_FILE_PATH = "data/local_store.json"
+
+logger = get_logger("local_storage")
+
+
+class LocalStoreManager:
+ file_path: str
+ """本地存储路径"""
+
+ store: dict[str, str | list | dict | int | float | bool]
+ """本地存储数据"""
+
+ def __init__(self, local_store_path: str | None = None):
+ self.file_path = local_store_path or LOCAL_STORE_FILE_PATH
+ self.store = {}
+ self.load_local_store()
+
+ def __getitem__(self, item: str) -> str | list | dict | int | float | bool | None:
+ """获取本地存储数据"""
+ return self.store.get(item, None)
+
+ def __setitem__(self, key: str, value: str | list | dict | int | float | bool):
+ """设置本地存储数据"""
+ self.store[key] = value
+ self.save_local_store()
+
+ def __contains__(self, item: str) -> bool:
+ """检查本地存储数据是否存在"""
+ return item in self.store
+
+ def load_local_store(self):
+ """加载本地存储数据"""
+ if os.path.exists(self.file_path):
+ # 存在本地存储文件,加载数据
+ logger.info("正在阅读记事本......我在看,我真的在看!")
+ logger.debug(f"加载本地存储数据: {self.file_path}")
+ try:
+ with open(self.file_path, "r", encoding="utf-8") as f:
+ self.store = json.load(f)
+ logger.success("全都记起来了!")
+ except json.JSONDecodeError:
+ logger.warning("啊咧?记事本被弄脏了,正在重建记事本......")
+ self.store = {}
+ with open(self.file_path, "w", encoding="utf-8") as f:
+ json.dump({}, f, ensure_ascii=False, indent=4)
+ logger.success("记事本重建成功!")
+ else:
+ # 不存在本地存储文件,创建新的目录和文件
+ logger.warning("啊咧?记事本不存在,正在创建新的记事本......")
+ os.makedirs(os.path.dirname(self.file_path), exist_ok=True)
+ with open(self.file_path, "w", encoding="utf-8") as f:
+ json.dump({}, f, ensure_ascii=False, indent=4)
+ logger.success("记事本创建成功!")
+
+ def save_local_store(self):
+ """保存本地存储数据"""
+ logger.debug(f"保存本地存储数据: {self.file_path}")
+ with open(self.file_path, "w", encoding="utf-8") as f:
+ json.dump(self.store, f, ensure_ascii=False, indent=4)
+
+
+local_storage = LocalStoreManager("data/local_store.json") # 全局单例化
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index b1660d720..a0de95ec9 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -1,354 +1,550 @@
-import threading
-import time
from collections import defaultdict
from datetime import datetime, timedelta
-from typing import Any, Dict, List
+from typing import Any, Dict, Tuple, List
+
from src.common.logger import get_module_logger
+from src.manager.async_task_manager import AsyncTask
from ...common.database import db
+from src.manager.local_store_manager import local_storage
-logger = get_module_logger("llm_statistics")
+logger = get_module_logger("maibot_statistic")
+
+# 统计数据的键
+TOTAL_REQ_CNT = "total_requests"
+TOTAL_COST = "total_cost"
+REQ_CNT_BY_TYPE = "requests_by_type"
+REQ_CNT_BY_USER = "requests_by_user"
+REQ_CNT_BY_MODEL = "requests_by_model"
+IN_TOK_BY_TYPE = "in_tokens_by_type"
+IN_TOK_BY_USER = "in_tokens_by_user"
+IN_TOK_BY_MODEL = "in_tokens_by_model"
+OUT_TOK_BY_TYPE = "out_tokens_by_type"
+OUT_TOK_BY_USER = "out_tokens_by_user"
+OUT_TOK_BY_MODEL = "out_tokens_by_model"
+TOTAL_TOK_BY_TYPE = "tokens_by_type"
+TOTAL_TOK_BY_USER = "tokens_by_user"
+TOTAL_TOK_BY_MODEL = "tokens_by_model"
+COST_BY_TYPE = "costs_by_type"
+COST_BY_USER = "costs_by_user"
+COST_BY_MODEL = "costs_by_model"
+ONLINE_TIME = "online_time"
+TOTAL_MSG_CNT = "total_messages"
+MSG_CNT_BY_CHAT = "messages_by_chat"
-class LLMStatistics:
- def __init__(self, output_file: str = "llm_statistics.txt"):
- """初始化LLM统计类
+class OnlineTimeRecordTask(AsyncTask):
+ """在线时间记录任务"""
- Args:
- output_file: 统计结果输出文件路径
- """
- self.output_file = output_file
- self.running = False
- self.stats_thread = None
- self.console_thread = None
- self._init_database()
- self.name_dict: Dict[List] = {}
+ def __init__(self):
+ super().__init__(task_name="Online Time Record Task", run_interval=60)
+
+ self.record_id: str | None = None
+ """记录ID"""
+
+ self._init_database() # 初始化数据库
@staticmethod
def _init_database():
- """初始化数据库集合"""
+ """初始化数据库"""
if "online_time" not in db.list_collection_names():
+ # 初始化数据库(在线时长)
db.create_collection("online_time")
- db.online_time.create_index([("timestamp", 1)])
+ # 创建索引
+ if ("end_timestamp", 1) not in db.online_time.list_indexes():
+ db.online_time.create_index([("end_timestamp", 1)])
- def start(self):
- """启动统计线程"""
- if not self.running:
- self.running = True
- # 启动文件统计线程
- self.stats_thread = threading.Thread(target=self._stats_loop)
- self.stats_thread.daemon = True
- self.stats_thread.start()
- # 启动控制台输出线程
- self.console_thread = threading.Thread(target=self._console_output_loop)
- self.console_thread.daemon = True
- self.console_thread.start()
+ async def run(self):
+ try:
+ if self.record_id:
+ # 如果有记录,则更新结束时间
+ db.online_time.update_one(
+ {"_id": self.record_id},
+ {
+ "$set": {
+ "end_timestamp": datetime.now() + timedelta(minutes=1),
+ }
+ },
+ )
+ else:
+ # 如果没有记录,检查一分钟以内是否已有记录
+ current_time = datetime.now()
+ recent_record = db.online_time.find_one(
+ {"end_timestamp": {"$gte": current_time - timedelta(minutes=1)}}
+ )
- def stop(self):
- """停止统计线程"""
- self.running = False
- if self.stats_thread:
- self.stats_thread.join()
- if self.console_thread:
- self.console_thread.join()
+ if not recent_record:
+ # 若没有记录,则插入新的在线时间记录
+ self.record_id = db.online_time.insert_one(
+ {
+ "start_timestamp": current_time,
+ "end_timestamp": current_time + timedelta(minutes=1),
+ }
+ ).inserted_id
+ else:
+ # 如果有记录,则更新结束时间
+ self.record_id = recent_record["_id"]
+ db.online_time.update_one(
+ {"_id": self.record_id},
+ {
+ "$set": {
+ "end_timestamp": current_time + timedelta(minutes=1),
+ }
+ },
+ )
+ except Exception:
+ logger.exception("在线时间记录失败")
- @staticmethod
- def _record_online_time():
- """记录在线时间"""
- current_time = datetime.now()
- # 检查5分钟内是否已有记录
- recent_record = db.online_time.find_one({"timestamp": {"$gte": current_time - timedelta(minutes=5)}})
- if not recent_record:
- db.online_time.insert_one(
- {
- "timestamp": current_time,
- "duration": 5, # 5分钟
- }
+class StatisticOutputTask(AsyncTask):
+ """统计输出任务"""
+
+ SEP_LINE = "-" * 84
+
+ def __init__(self, record_file_path: str = "llm_statistics.txt"):
+ # 延迟300秒启动,运行间隔300秒
+ super().__init__(task_name="Statistics Data Output Task", wait_before_start=300, run_interval=300)
+
+ self.name_mapping: Dict[str, Tuple[str, float]] = {}
+ """
+ 联系人/群聊名称映射 {聊天ID: (联系人/群聊名称, 记录时间(timestamp))}
+ 注:设计记录时间的目的是方便更新名称,使联系人/群聊名称保持最新
+ """
+
+ self.record_file_path: str = record_file_path
+ """
+ 记录文件路径
+ """
+
+ now = datetime.now()
+ self.stat_period: List[Tuple[str, datetime, str]] = [
+ ("all_time", datetime(2000, 1, 1), "自部署以来的"),
+ ("last_7_days", now - timedelta(days=7), "最近7天的"),
+ ("last_24_hours", now - timedelta(days=1), "最近24小时的"),
+ ("last_hour", now - timedelta(hours=1), "最近1小时的"),
+ ]
+ """
+ 统计时间段
+ """
+
+ def _statistic_console_output(self, stats: Dict[str, Any]):
+ """
+ 输出统计数据到控制台
+ """
+ # 输出最近一小时的统计数据
+
+ output = [
+ self.SEP_LINE,
+ f" 最近1小时的统计数据 (详细信息见文件:{self.record_file_path})",
+ self.SEP_LINE,
+ self._format_total_stat(stats["last_hour"]),
+ "",
+ self._format_model_classified_stat(stats["last_hour"]),
+ "",
+ self._format_chat_stat(stats["last_hour"]),
+ self.SEP_LINE,
+ "",
+ ]
+
+ logger.info("\n" + "\n".join(output))
+
+ def _statistic_file_output(self, stats: Dict[str, Any]):
+ """
+ 输出统计数据到文件
+ """
+ output = [f"MaiBot运行统计报告 (生成时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')})", ""]
+
+ def _format_stat_data(title: str, stats_: Dict[str, Any]) -> str:
+ """
+ 格式化统计数据
+ """
+ return "\n".join(
+ [
+ self.SEP_LINE,
+ f" {title}",
+ self.SEP_LINE,
+ self._format_total_stat(stats_),
+ "",
+ self._format_model_classified_stat(stats_),
+ "",
+ self._format_req_type_classified_stat(stats_),
+ "",
+ self._format_user_classified_stat(stats_),
+ "",
+ self._format_chat_stat(stats_),
+ "",
+ ]
)
- def _collect_statistics_for_period(self, start_time: datetime) -> Dict[str, Any]:
- """收集指定时间段的LLM请求统计数据
+ for period_key, period_start_time, period_desc in self.stat_period:
+ if period_key in stats:
+ # 统计数据存在
+ output.append(
+ _format_stat_data(
+ f"{period_desc}统计数据 (自{period_start_time.strftime('%Y-%m-%d %H:%M:%S')}开始)",
+ stats[period_key],
+ )
+ )
- Args:
- start_time: 统计开始时间
+ with open(self.record_file_path, "w", encoding="utf-8") as f:
+ f.write("\n\n".join(output))
+
+ async def run(self):
+ try:
+ # 收集统计数据
+ stats = self._collect_all_statistics()
+
+ # 输出统计数据到控制台
+ self._statistic_console_output(stats)
+ # 输出统计数据到文件
+ self._statistic_file_output(stats)
+ except Exception as e:
+ logger.exception(f"输出统计数据过程中发生异常,错误信息:{e}")
+
+ # -- 以下为统计数据收集方法 --
+
+ @staticmethod
+ def _collect_model_request_for_period(collect_period: List[Tuple[str, datetime, str]]) -> Dict[str, Any]:
"""
+ 收集指定时间段的LLM请求统计数据
+
+ :param collect_period: 统计时间段
+ """
+ if len(collect_period) <= 0:
+ return {}
+ else:
+ # 排序-按照时间段开始时间降序排列(最晚的时间段在前)
+ collect_period.sort(key=lambda x: x[1], reverse=True)
+
stats = {
- "total_requests": 0,
- "requests_by_type": defaultdict(int),
- "requests_by_user": defaultdict(int),
- "requests_by_model": defaultdict(int),
- "average_tokens": 0,
- "total_tokens": 0,
- "total_cost": 0.0,
- "costs_by_user": defaultdict(float),
- "costs_by_type": defaultdict(float),
- "costs_by_model": defaultdict(float),
- # 新增token统计字段
- "tokens_by_type": defaultdict(int),
- "tokens_by_user": defaultdict(int),
- "tokens_by_model": defaultdict(int),
- # 新增在线时间统计
- "online_time_minutes": 0,
- # 新增消息统计字段
- "total_messages": 0,
- "messages_by_user": defaultdict(int),
- "messages_by_chat": defaultdict(int),
+ period_key: {
+ # 总LLM请求数
+ TOTAL_REQ_CNT: 0,
+ # 请求次数统计
+ REQ_CNT_BY_TYPE: defaultdict(int),
+ REQ_CNT_BY_USER: defaultdict(int),
+ REQ_CNT_BY_MODEL: defaultdict(int),
+ # 输入Token数
+ IN_TOK_BY_TYPE: defaultdict(int),
+ IN_TOK_BY_USER: defaultdict(int),
+ IN_TOK_BY_MODEL: defaultdict(int),
+ # 输出Token数
+ OUT_TOK_BY_TYPE: defaultdict(int),
+ OUT_TOK_BY_USER: defaultdict(int),
+ OUT_TOK_BY_MODEL: defaultdict(int),
+ # 总Token数
+ TOTAL_TOK_BY_TYPE: defaultdict(int),
+ TOTAL_TOK_BY_USER: defaultdict(int),
+ TOTAL_TOK_BY_MODEL: defaultdict(int),
+ # 总开销
+ TOTAL_COST: 0.0,
+ # 请求开销统计
+ COST_BY_TYPE: defaultdict(float),
+ COST_BY_USER: defaultdict(float),
+ COST_BY_MODEL: defaultdict(float),
+ }
+ for period_key, _, _ in collect_period
}
- cursor = db.llm_usage.find({"timestamp": {"$gte": start_time}})
- total_requests = 0
+ # 以最早的时间戳为起始时间获取记录
+ for record in db.llm_usage.find({"timestamp": {"$gte": collect_period[-1][1]}}):
+ record_timestamp = record.get("timestamp")
+ for idx, (_, period_start, _) in enumerate(collect_period):
+ if record_timestamp >= period_start:
+ # 如果记录时间在当前时间段内,则它一定在更早的时间段内
+ # 因此,我们可以直接跳过更早的时间段的判断,直接更新当前以及更早时间段的统计数据
+ for period_key, _, _ in collect_period[idx:]:
+ stats[period_key][TOTAL_REQ_CNT] += 1
- for doc in cursor:
- stats["total_requests"] += 1
- request_type = doc.get("request_type", "unknown")
- user_id = str(doc.get("user_id", "unknown"))
- model_name = doc.get("model_name", "unknown")
+ request_type = record.get("request_type", "unknown") # 请求类型
+ user_id = str(record.get("user_id", "unknown")) # 用户ID
+ model_name = record.get("model_name", "unknown") # 模型名称
- stats["requests_by_type"][request_type] += 1
- stats["requests_by_user"][user_id] += 1
- stats["requests_by_model"][model_name] += 1
+ stats[period_key][REQ_CNT_BY_TYPE][request_type] += 1
+ stats[period_key][REQ_CNT_BY_USER][user_id] += 1
+ stats[period_key][REQ_CNT_BY_MODEL][model_name] += 1
- prompt_tokens = doc.get("prompt_tokens", 0)
- completion_tokens = doc.get("completion_tokens", 0)
- total_tokens = prompt_tokens + completion_tokens
- stats["tokens_by_type"][request_type] += total_tokens
- stats["tokens_by_user"][user_id] += total_tokens
- stats["tokens_by_model"][model_name] += total_tokens
- stats["total_tokens"] += total_tokens
+ prompt_tokens = record.get("prompt_tokens", 0) # 输入Token数
+ completion_tokens = record.get("completion_tokens", 0) # 输出Token数
+ total_tokens = prompt_tokens + completion_tokens # Token总数 = 输入Token数 + 输出Token数
- cost = doc.get("cost", 0.0)
- stats["total_cost"] += cost
- stats["costs_by_user"][user_id] += cost
- stats["costs_by_type"][request_type] += cost
- stats["costs_by_model"][model_name] += cost
+ stats[period_key][IN_TOK_BY_TYPE][request_type] += prompt_tokens
+ stats[period_key][IN_TOK_BY_USER][user_id] += prompt_tokens
+ stats[period_key][IN_TOK_BY_MODEL][model_name] += prompt_tokens
- total_requests += 1
+ stats[period_key][OUT_TOK_BY_TYPE][request_type] += completion_tokens
+ stats[period_key][OUT_TOK_BY_USER][user_id] += completion_tokens
+ stats[period_key][OUT_TOK_BY_MODEL][model_name] += completion_tokens
- if total_requests > 0:
- stats["average_tokens"] = stats["total_tokens"] / total_requests
+ stats[period_key][TOTAL_TOK_BY_TYPE][request_type] += total_tokens
+ stats[period_key][TOTAL_TOK_BY_USER][user_id] += total_tokens
+ stats[period_key][TOTAL_TOK_BY_MODEL][model_name] += total_tokens
+
+ cost = record.get("cost", 0.0)
+ stats[period_key][TOTAL_COST] += cost
+ stats[period_key][COST_BY_TYPE][request_type] += cost
+ stats[period_key][COST_BY_USER][user_id] += cost
+ stats[period_key][COST_BY_MODEL][model_name] += cost
+ break # 取消更早时间段的判断
+
+ return stats
+
+ @staticmethod
+ def _collect_online_time_for_period(collect_period: List[Tuple[str, datetime, str]]) -> Dict[str, Any]:
+ """
+ 收集指定时间段的在线时间统计数据
+
+ :param collect_period: 统计时间段
+ """
+ if len(collect_period) <= 0:
+ return {}
+ else:
+ # 排序-按照时间段开始时间降序排列(最晚的时间段在前)
+ collect_period.sort(key=lambda x: x[1], reverse=True)
+
+ stats = {
+ period_key: {
+ # 在线时间统计
+ ONLINE_TIME: 0.0,
+ }
+ for period_key, _, _ in collect_period
+ }
# 统计在线时间
- online_time_cursor = db.online_time.find({"timestamp": {"$gte": start_time}})
- for doc in online_time_cursor:
- stats["online_time_minutes"] += doc.get("duration", 0)
+ for record in db.online_time.find({"end_timestamp": {"$gte": collect_period[-1][1]}}):
+ end_timestamp: datetime = record.get("end_timestamp")
+ for idx, (_, period_start, _) in enumerate(collect_period):
+ if end_timestamp >= period_start:
+ # 如果记录时间在当前时间段内,则它一定在更早的时间段内
+ # 因此,我们可以直接跳过更早的时间段的判断,直接更新当前以及更早时间段的统计数据
+ for period_key, _period_start, _ in collect_period[idx:]:
+ start_timestamp: datetime = record.get("start_timestamp")
+ if start_timestamp < _period_start:
+ # 如果开始时间在查询边界之前,则使用开始时间
+ stats[period_key][ONLINE_TIME] += (end_timestamp - _period_start).total_seconds() / 60
+ else:
+ # 否则,使用开始时间
+ stats[period_key][ONLINE_TIME] += (end_timestamp - start_timestamp).total_seconds() / 60
+ break # 取消更早时间段的判断
+
+ return stats
+
+ def _collect_message_count_for_period(self, collect_period: List[Tuple[str, datetime, str]]) -> Dict[str, Any]:
+ """
+ 收集指定时间段的消息统计数据
+
+ :param collect_period: 统计时间段
+ """
+ if len(collect_period) <= 0:
+ return {}
+ else:
+ # 排序-按照时间段开始时间降序排列(最晚的时间段在前)
+ collect_period.sort(key=lambda x: x[1], reverse=True)
+
+ stats = {
+ period_key: {
+ # 消息统计
+ TOTAL_MSG_CNT: 0,
+ MSG_CNT_BY_CHAT: defaultdict(int),
+ }
+ for period_key, _, _ in collect_period
+ }
# 统计消息量
- messages_cursor = db.messages.find({"time": {"$gte": start_time.timestamp()}})
- for doc in messages_cursor:
- stats["total_messages"] += 1
- # user_id = str(doc.get("user_info", {}).get("user_id", "unknown"))
- chat_info = doc.get("chat_info", {})
- user_info = doc.get("user_info", {})
- user_id = str(user_info.get("user_id", "unknown"))
- message_time = doc.get("time", 0)
- group_info = chat_info.get("group_info") if chat_info else {}
- # print(f"group_info: {group_info}")
- group_name = None
- if group_info:
- group_id = f"g{group_info.get('group_id')}"
- group_name = group_info.get("group_name", f"群{group_info.get('group_id')}")
- if user_info and not group_name:
- group_id = f"u{user_info['user_id']}"
- group_name = user_info["user_nickname"]
- if self.name_dict.get(group_id):
- if message_time > self.name_dict.get(group_id)[1]:
- self.name_dict[group_id] = [group_name, message_time]
+ for message in db.messages.find({"time": {"$gte": collect_period[-1][1].timestamp()}}):
+ chat_info = message.get("chat_info", None) # 聊天信息
+ user_info = message.get("user_info", None) # 用户信息(消息发送人)
+ message_time = message.get("time", 0) # 消息时间
+
+ group_info = chat_info.get("group_info") if chat_info else None # 尝试获取群聊信息
+ if group_info is not None:
+ # 若有群聊信息
+ chat_id = f"g{group_info.get('group_id')}"
+ chat_name = group_info.get("group_name", f"群{group_info.get('group_id')}")
+ elif user_info:
+ # 若没有群聊信息,则尝试获取用户信息
+ chat_id = f"u{user_info['user_id']}"
+ chat_name = user_info["user_nickname"]
else:
- self.name_dict[group_id] = [group_name, message_time]
- # print(f"group_name: {group_name}")
- stats["messages_by_user"][user_id] += 1
- stats["messages_by_chat"][group_id] += 1
+ continue # 如果没有群组信息也没有用户信息,则跳过
+
+ if chat_id in self.name_mapping:
+ if chat_name != self.name_mapping[chat_id][0] and message_time > self.name_mapping[chat_id][1]:
+ # 如果用户名称不同,且新消息时间晚于之前记录的时间,则更新用户名称
+ self.name_mapping[chat_id] = (chat_name, message_time)
+ else:
+ self.name_mapping[chat_id] = (chat_name, message_time)
+
+ for idx, (_, period_start, _) in enumerate(collect_period):
+ if message_time >= period_start.timestamp():
+ # 如果记录时间在当前时间段内,则它一定在更早的时间段内
+ # 因此,我们可以直接跳过更早的时间段的判断,直接更新当前以及更早时间段的统计数据
+ for period_key, _, _ in collect_period[idx:]:
+ stats[period_key][TOTAL_MSG_CNT] += 1
+ stats[period_key][MSG_CNT_BY_CHAT][chat_id] += 1
+ break
return stats
def _collect_all_statistics(self) -> Dict[str, Dict[str, Any]]:
- """收集所有时间范围的统计数据"""
+ """
+ 收集各时间段的统计数据
+ """
+
now = datetime.now()
- # 使用2000年1月1日作为"所有时间"的起始时间,这是一个更合理的起始点
- all_time_start = datetime(2000, 1, 1)
- return {
- "all_time": self._collect_statistics_for_period(all_time_start),
- "last_7_days": self._collect_statistics_for_period(now - timedelta(days=7)),
- "last_24_hours": self._collect_statistics_for_period(now - timedelta(days=1)),
- "last_hour": self._collect_statistics_for_period(now - timedelta(hours=1)),
- }
+ last_all_time_stat = None
- def _format_stats_section(self, stats: Dict[str, Any], title: str) -> str:
- """格式化统计部分的输出"""
- output = ["\n" + "-" * 84, f"{title}", "-" * 84, f"总请求数: {stats['total_requests']}"]
+ stat = {period[0]: {} for period in self.stat_period}
- if stats["total_requests"] > 0:
- output.append(f"总Token数: {stats['total_tokens']}")
- output.append(f"总花费: {stats['total_cost']:.4f}¥")
- output.append(f"在线时间: {stats['online_time_minutes']}分钟")
- output.append(f"总消息数: {stats['total_messages']}\n")
+ if "last_full_statistics_timestamp" in local_storage and "last_full_statistics" in local_storage:
+ # 若存有上次完整统计的时间戳,则使用该时间戳作为"所有时间"的起始时间,进行增量统计
+ last_full_stat_ts: float = local_storage["last_full_statistics_timestamp"]
+ last_all_time_stat = local_storage["last_full_statistics"]
+ self.stat_period = [item for item in self.stat_period if item[0] != "all_time"] # 删除"所有时间"的统计时段
+ self.stat_period.append(("all_time", datetime.fromtimestamp(last_full_stat_ts), "自部署以来的"))
- data_fmt = "{:<32} {:>10} {:>14} {:>13.4f} ¥"
+ model_req_stat = self._collect_model_request_for_period(self.stat_period)
+ online_time_stat = self._collect_online_time_for_period(self.stat_period)
+ message_count_stat = self._collect_message_count_for_period(self.stat_period)
+
+ # 统计数据合并
+ # 合并三类统计数据
+ for period_key, _, _ in self.stat_period:
+ stat[period_key].update(model_req_stat[period_key])
+ stat[period_key].update(online_time_stat[period_key])
+ stat[period_key].update(message_count_stat[period_key])
+
+ if last_all_time_stat:
+ # 若存在上次完整统计数据,则将其与当前统计数据合并
+ for key, val in last_all_time_stat.items():
+ if isinstance(val, dict):
+ # 是字典类型,则进行合并
+ for sub_key, sub_val in val.items():
+ stat["all_time"][key][sub_key] += sub_val
+ else:
+ # 直接合并
+ stat["all_time"][key] += val
+
+ # 更新上次完整统计数据的时间戳
+ local_storage["last_full_statistics_timestamp"] = now.timestamp()
+ # 更新上次完整统计数据
+ local_storage["last_full_statistics"] = stat["all_time"]
+
+ return stat
+
+ # -- 以下为统计数据格式化方法 --
+
+ @staticmethod
+ def _format_total_stat(stats: Dict[str, Any]) -> str:
+ """
+ 格式化总统计数据
+ """
+ output = [
+ f"总在线时间: {stats[ONLINE_TIME]:.1f}分钟",
+ f"总消息数: {stats[TOTAL_MSG_CNT]}",
+ f"总请求数: {stats[TOTAL_REQ_CNT]}",
+ f"总花费: {stats[TOTAL_COST]:.4f}¥",
+ "",
+ ]
+
+ return "\n".join(output)
+
+ @staticmethod
+ def _format_model_classified_stat(stats: Dict[str, Any]) -> str:
+ """
+ 格式化按模型分类的统计数据
+ """
+ if stats[TOTAL_REQ_CNT] > 0:
+ data_fmt = "{:<32} {:>10} {:>12} {:>12} {:>12} {:>9.4f}¥"
+
+ output = [
+ "按模型分类统计:",
+ " 模型名称 调用次数 输入Token 输出Token Token总量 累计花费",
+ ]
+ for model_name, count in sorted(stats[REQ_CNT_BY_MODEL].items()):
+ name = model_name[:29] + "..." if len(model_name) > 32 else model_name
+ in_tokens = stats[IN_TOK_BY_MODEL][model_name]
+ out_tokens = stats[OUT_TOK_BY_MODEL][model_name]
+ tokens = stats[TOTAL_TOK_BY_MODEL][model_name]
+ cost = stats[COST_BY_MODEL][model_name]
+ output.append(data_fmt.format(name, count, in_tokens, out_tokens, tokens, cost))
- # 按模型统计
- output.append("按模型统计:")
- output.append("模型名称 调用次数 Token总量 累计花费")
- for model_name, count in sorted(stats["requests_by_model"].items()):
- tokens = stats["tokens_by_model"][model_name]
- cost = stats["costs_by_model"][model_name]
- output.append(
- data_fmt.format(model_name[:30] + ".." if len(model_name) > 32 else model_name, count, tokens, cost)
- )
output.append("")
+ return "\n".join(output)
+ else:
+ return ""
+ @staticmethod
+ def _format_req_type_classified_stat(stats: Dict[str, Any]) -> str:
+ """
+ 格式化按请求类型分类的统计数据
+ """
+ if stats[TOTAL_REQ_CNT] > 0:
# 按请求类型统计
- output.append("按请求类型统计:")
- output.append("模型名称 调用次数 Token总量 累计花费")
- for req_type, count in sorted(stats["requests_by_type"].items()):
- tokens = stats["tokens_by_type"][req_type]
- cost = stats["costs_by_type"][req_type]
- output.append(
- data_fmt.format(req_type[:22] + ".." if len(req_type) > 24 else req_type, count, tokens, cost)
- )
- output.append("")
+ data_fmt = "{:<32} {:>10} {:>12} {:>12} {:>12} {:>9.4f}¥"
+ output = [
+ "按请求类型分类统计:",
+ " 请求类型 调用次数 输入Token 输出Token Token总量 累计花费",
+ ]
+ for req_type, count in sorted(stats[REQ_CNT_BY_TYPE].items()):
+ name = req_type[:29] + "..." if len(req_type) > 32 else req_type
+ in_tokens = stats[IN_TOK_BY_TYPE][req_type]
+ out_tokens = stats[OUT_TOK_BY_TYPE][req_type]
+ tokens = stats[TOTAL_TOK_BY_TYPE][req_type]
+ cost = stats[COST_BY_TYPE][req_type]
+ output.append(data_fmt.format(name, count, in_tokens, out_tokens, tokens, cost))
+
+ output.append("")
+ return "\n".join(output)
+ else:
+ return ""
+
+ @staticmethod
+ def _format_user_classified_stat(stats: Dict[str, Any]) -> str:
+ """
+ 格式化按用户分类的统计数据
+ """
+ if stats[TOTAL_REQ_CNT] > 0:
# 修正用户统计列宽
- output.append("按用户统计:")
- output.append("用户ID 调用次数 Token总量 累计花费")
- for user_id, count in sorted(stats["requests_by_user"].items()):
- tokens = stats["tokens_by_user"][user_id]
- cost = stats["costs_by_user"][user_id]
+ data_fmt = "{:<32} {:>10} {:>12} {:>12} {:>12} {:>9.4f}¥"
+
+ output = [
+ "按用户分类统计:",
+ " 用户名称 调用次数 输入Token 输出Token Token总量 累计花费",
+ ]
+ for user_id, count in sorted(stats[REQ_CNT_BY_USER].items()):
+ in_tokens = stats[IN_TOK_BY_USER][user_id]
+ out_tokens = stats[OUT_TOK_BY_USER][user_id]
+ tokens = stats[TOTAL_TOK_BY_USER][user_id]
+ cost = stats[COST_BY_USER][user_id]
output.append(
data_fmt.format(
user_id[:22], # 不再添加省略号,保持原始ID
count,
+ in_tokens,
+ out_tokens,
tokens,
cost,
)
)
+
output.append("")
+ return "\n".join(output)
+ else:
+ return ""
- # 添加聊天统计
- output.append("群组统计:")
- output.append("群组名称 消息数量")
- for group_id, count in sorted(stats["messages_by_chat"].items()):
- output.append(f"{self.name_dict[group_id][0][:32]:<32} {count:>10}")
+ def _format_chat_stat(self, stats: Dict[str, Any]) -> str:
+ """
+ 格式化聊天统计数据
+ """
+ if stats[TOTAL_MSG_CNT] > 0:
+ output = ["聊天消息统计:", " 联系人/群组名称 消息数量"]
+ for chat_id, count in sorted(stats[MSG_CNT_BY_CHAT].items()):
+ output.append(f"{self.name_mapping[chat_id][0][:32]:<32} {count:>10}")
- return "\n".join(output)
-
- def _format_stats_section_lite(self, stats: Dict[str, Any], title: str) -> str:
- """格式化统计部分的输出"""
- output = ["\n" + "-" * 84, f"{title}", "-" * 84]
-
- # output.append(f"总请求数: {stats['total_requests']}")
- if stats["total_requests"] > 0:
- # output.append(f"总Token数: {stats['total_tokens']}")
- output.append(f"总花费: {stats['total_cost']:.4f}¥")
- # output.append(f"在线时间: {stats['online_time_minutes']}分钟")
- output.append(f"总消息数: {stats['total_messages']}\n")
-
- data_fmt = "{:<32} {:>10} {:>14} {:>13.4f} ¥"
-
- # 按模型统计
- output.append("按模型统计:")
- output.append("模型名称 调用次数 Token总量 累计花费")
- for model_name, count in sorted(stats["requests_by_model"].items()):
- tokens = stats["tokens_by_model"][model_name]
- cost = stats["costs_by_model"][model_name]
- output.append(
- data_fmt.format(model_name[:30] + ".." if len(model_name) > 32 else model_name, count, tokens, cost)
- )
output.append("")
-
- # 按请求类型统计
- # output.append("按请求类型统计:")
- # output.append(("模型名称 调用次数 Token总量 累计花费"))
- # for req_type, count in sorted(stats["requests_by_type"].items()):
- # tokens = stats["tokens_by_type"][req_type]
- # cost = stats["costs_by_type"][req_type]
- # output.append(
- # data_fmt.format(req_type[:22] + ".." if len(req_type) > 24 else req_type, count, tokens, cost)
- # )
- # output.append("")
-
- # 修正用户统计列宽
- # output.append("按用户统计:")
- # output.append(("用户ID 调用次数 Token总量 累计花费"))
- # for user_id, count in sorted(stats["requests_by_user"].items()):
- # tokens = stats["tokens_by_user"][user_id]
- # cost = stats["costs_by_user"][user_id]
- # output.append(
- # data_fmt.format(
- # user_id[:22], # 不再添加省略号,保持原始ID
- # count,
- # tokens,
- # cost,
- # )
- # )
- # output.append("")
-
- # 添加聊天统计
- output.append("群组统计:")
- output.append("群组名称 消息数量")
- for group_id, count in sorted(stats["messages_by_chat"].items()):
- output.append(f"{self.name_dict[group_id][0][:32]:<32} {count:>10}")
-
- return "\n".join(output)
-
- def _save_statistics(self, all_stats: Dict[str, Dict[str, Any]]):
- """将统计结果保存到文件"""
- current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
-
- output = [f"LLM请求统计报告 (生成时间: {current_time})"]
-
- # 添加各个时间段的统计
- sections = [
- ("所有时间统计", "all_time"),
- ("最近7天统计", "last_7_days"),
- ("最近24小时统计", "last_24_hours"),
- ("最近1小时统计", "last_hour"),
- ]
-
- for title, key in sections:
- output.append(self._format_stats_section(all_stats[key], title))
-
- # 写入文件
- with open(self.output_file, "w", encoding="utf-8") as f:
- f.write("\n".join(output))
-
- def _console_output_loop(self):
- """控制台输出循环,每5分钟输出一次最近1小时的统计"""
- while self.running:
- # 等待5分钟
- for _ in range(300): # 5分钟 = 300秒
- if not self.running:
- break
- time.sleep(1)
- try:
- # 收集最近1小时的统计数据
- now = datetime.now()
- hour_stats = self._collect_statistics_for_period(now - timedelta(hours=1))
-
- # 使用logger输出
- stats_output = self._format_stats_section_lite(
- hour_stats, "最近1小时统计:详细信息见根目录文件:llm_statistics.txt"
- )
- logger.info("\n" + stats_output + "\n" + "=" * 50)
-
- except Exception:
- logger.exception("控制台统计数据输出失败")
-
- def _stats_loop(self):
- """统计循环,每5分钟运行一次"""
- while self.running:
- try:
- # 记录在线时间
- self._record_online_time()
- # 收集并保存统计数据
- all_stats = self._collect_all_statistics()
- self._save_statistics(all_stats)
- except Exception:
- logger.exception("统计数据处理失败")
-
- # 等待5分钟
- for _ in range(300): # 5分钟 = 300秒
- if not self.running:
- break
- time.sleep(1)
+ return "\n".join(output)
+ else:
+ return ""
From a1fbff1d6b7c83c818221ee4295e392a5ba8588a Mon Sep 17 00:00:00 2001
From: Oct-autumn
Date: Thu, 8 May 2025 23:56:28 +0800
Subject: [PATCH 17/20] =?UTF-8?q?refactor:=20=E9=87=8D=E6=9E=84=E6=83=85?=
=?UTF-8?q?=E7=BB=AA=E7=AE=A1=E7=90=86=E5=99=A8?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/do_tool/not_used/change_mood.py | 11 +-
src/heart_flow/chat_state_info.py | 6 +-
src/heart_flow/mai_state_manager.py | 6 +-
src/main.py | 33 +-
src/manager/async_task_manager.py | 4 +-
src/manager/mood_manager.py | 296 ++++++++++++++++++
src/plugins/__init__.py | 2 -
src/plugins/chat/bot.py | 17 +-
src/plugins/chat/utils.py | 14 +-
src/plugins/heartFC_chat/heartFC_chat.py | 46 +--
.../heartFC_chat/heartflow_prompt_builder.py | 5 +-
src/plugins/heartFC_chat/normal_chat.py | 26 +-
src/plugins/moods/moods.py | 293 -----------------
.../person_info/relationship_manager.py | 11 +-
14 files changed, 379 insertions(+), 391 deletions(-)
create mode 100644 src/manager/mood_manager.py
delete mode 100644 src/plugins/moods/moods.py
diff --git a/src/do_tool/not_used/change_mood.py b/src/do_tool/not_used/change_mood.py
index 5dee6ac98..5d1e7f7a6 100644
--- a/src/do_tool/not_used/change_mood.py
+++ b/src/do_tool/not_used/change_mood.py
@@ -1,10 +1,10 @@
-from src.do_tool.tool_can_use.base_tool import BaseTool
-from src.config.config import global_config
-from src.common.logger_manager import get_logger
-from src.plugins.moods.moods import MoodManager
-
from typing import Any
+from src.common.logger_manager import get_logger
+from src.config.config import global_config
+from src.do_tool.tool_can_use.base_tool import BaseTool
+from src.manager.mood_manager import mood_manager
+
logger = get_logger("change_mood_tool")
@@ -36,7 +36,6 @@ class ChangeMoodTool(BaseTool):
response_set = function_args.get("response_set")
_message_processed_plain_text = function_args.get("text")
- mood_manager = MoodManager.get_instance()
# gpt = ResponseGenerator()
if response_set is None:
diff --git a/src/heart_flow/chat_state_info.py b/src/heart_flow/chat_state_info.py
index 619f372fc..bda5c26c0 100644
--- a/src/heart_flow/chat_state_info.py
+++ b/src/heart_flow/chat_state_info.py
@@ -1,4 +1,4 @@
-from src.plugins.moods.moods import MoodManager
+from src.manager.mood_manager import mood_manager
import enum
@@ -13,5 +13,5 @@ class ChatStateInfo:
self.chat_status: ChatState = ChatState.ABSENT
self.current_state_time = 120
- self.mood_manager = MoodManager()
- self.mood = self.mood_manager.get_prompt()
+ self.mood_manager = mood_manager
+ self.mood = self.mood_manager.get_mood_prompt()
diff --git a/src/heart_flow/mai_state_manager.py b/src/heart_flow/mai_state_manager.py
index d289a94a1..3c6c19d66 100644
--- a/src/heart_flow/mai_state_manager.py
+++ b/src/heart_flow/mai_state_manager.py
@@ -3,7 +3,7 @@ import time
import random
from typing import List, Tuple, Optional
from src.common.logger_manager import get_logger
-from src.plugins.moods.moods import MoodManager
+from src.manager.mood_manager import mood_manager
from src.config.config import global_config
logger = get_logger("mai_state")
@@ -88,7 +88,7 @@ class MaiStateInfo:
self.last_min_check_time: float = time.time() # 上次1分钟规则检查时间
# Mood management is now part of MaiStateInfo
- self.mood_manager = MoodManager.get_instance() # Use singleton instance
+ self.mood_manager = mood_manager # Use singleton instance
def update_mai_status(self, new_status: MaiState) -> bool:
"""
@@ -124,7 +124,7 @@ class MaiStateInfo:
def get_mood_prompt(self) -> str:
"""获取当前的心情提示词"""
# Delegate to the internal mood manager
- return self.mood_manager.get_prompt()
+ return self.mood_manager.get_mood_prompt()
def get_current_state(self) -> MaiState:
"""获取当前的 MaiState"""
diff --git a/src/main.py b/src/main.py
index ef0828918..fbb40e3c5 100644
--- a/src/main.py
+++ b/src/main.py
@@ -1,9 +1,11 @@
import asyncio
import time
+from maim_message import MessageServer
+
from .manager.async_task_manager import async_task_manager
from .plugins.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
-from .plugins.moods.moods import MoodManager
+from src.manager.mood_manager import logger, MoodPrintTask, MoodUpdateTask
from .plugins.schedule.schedule_generator import bot_schedule
from .plugins.emoji_system.emoji_manager import emoji_manager
from .plugins.person_info.person_info import person_info_manager
@@ -18,7 +20,7 @@ from .plugins.chat.bot import chat_bot
from .common.logger_manager import get_logger
from .plugins.remote import heartbeat_thread # noqa: F401
from .individuality.individuality import Individuality
-from .common.server import global_server
+from .common.server import global_server, Server
from rich.traceback import install
from .api.main import start_api_server
@@ -28,20 +30,15 @@ logger = get_logger("main")
class MainSystem:
- mood_manager: MoodManager
- hippocampus_manager: HippocampusManager
- individuality: Individuality
-
def __init__(self):
- self.mood_manager = MoodManager.get_instance()
- self.hippocampus_manager = HippocampusManager.get_instance()
- self.individuality = Individuality.get_instance()
+ self.hippocampus_manager: HippocampusManager = HippocampusManager.get_instance()
+ self.individuality: Individuality = Individuality.get_instance()
# 使用消息API替代直接的FastAPI实例
from .plugins.message import global_api
- self.app = global_api
- self.server = global_server
+ self.app: MessageServer = global_api
+ self.server: Server = global_server
async def initialize(self):
"""初始化系统组件"""
@@ -69,9 +66,10 @@ class MainSystem:
emoji_manager.initialize()
logger.success("表情包管理器初始化成功")
- # 启动情绪管理器
- self.mood_manager.start_mood_update(update_interval=global_config.mood_update_interval)
- logger.success("情绪管理器启动成功")
+ # 添加情绪衰减任务
+ await async_task_manager.add_task(MoodUpdateTask())
+ # 添加情绪打印任务
+ await async_task_manager.add_task(MoodPrintTask())
# 检查并清除person_info冗余字段,启动个人习惯推断
await person_info_manager.del_all_undefined_field()
@@ -136,7 +134,6 @@ class MainSystem:
self.build_memory_task(),
self.forget_memory_task(),
self.consolidate_memory_task(),
- self.print_mood_task(),
self.remove_recalled_message_task(),
emoji_manager.start_periodic_check_register(),
self.app.run(),
@@ -170,12 +167,6 @@ class MainSystem:
await HippocampusManager.get_instance().consolidate_memory()
print("\033[1;32m[记忆整合]\033[0m 记忆整合完成")
- async def print_mood_task(self):
- """打印情绪状态"""
- while True:
- self.mood_manager.print_mood_status()
- await asyncio.sleep(60)
-
@staticmethod
async def remove_recalled_message_task():
"""删除撤回消息任务"""
diff --git a/src/manager/async_task_manager.py b/src/manager/async_task_manager.py
index 6a3f1b813..720e918a9 100644
--- a/src/manager/async_task_manager.py
+++ b/src/manager/async_task_manager.py
@@ -87,7 +87,7 @@ class AsyncTaskManager:
if not issubclass(task.__class__, AsyncTask):
raise TypeError(f"task '{task.__class__.__name__}' 必须是继承 AsyncTask 的子类")
- with self._lock: # 由于可能需要await等待任务完成,所以需要加异步锁
+ async with self._lock: # 由于可能需要await等待任务完成,所以需要加异步锁
if task.task_name in self.tasks:
logger.warning(f"已存在名称为 '{task.task_name}' 的任务,正在尝试取消并替换")
self.tasks[task.task_name].cancel() # 取消已存在的任务
@@ -120,7 +120,7 @@ class AsyncTaskManager:
"""
终止所有任务并等待它们完成(该方法会阻塞其它尝试add_task()的操作)
"""
- with self._lock: # 由于可能需要await等待任务完成,所以需要加异步锁
+ async with self._lock: # 由于可能需要await等待任务完成,所以需要加异步锁
# 设置中止标志
self.abort_flag.set()
# 取消所有任务
diff --git a/src/manager/mood_manager.py b/src/manager/mood_manager.py
new file mode 100644
index 000000000..42677d4e1
--- /dev/null
+++ b/src/manager/mood_manager.py
@@ -0,0 +1,296 @@
+import asyncio
+import math
+import time
+from dataclasses import dataclass
+from typing import Dict, Tuple
+
+from ..config.config import global_config
+from ..common.logger_manager import get_logger
+from ..manager.async_task_manager import AsyncTask
+from ..individuality.individuality import Individuality
+
+logger = get_logger("mood")
+
+
+@dataclass
+class MoodState:
+ valence: float
+ """愉悦度 (-1.0 到 1.0),-1表示极度负面,1表示极度正面"""
+ arousal: float
+ """唤醒度 (-1.0 到 1.0),-1表示抑制,1表示兴奋"""
+ text: str
+ """心情的文本描述"""
+
+
+@dataclass
+class MoodChangeHistory:
+ valence_direction_factor: int
+ """愉悦度变化的系数(正为增益,负为抑制)"""
+ arousal_direction_factor: int
+ """唤醒度变化的系数(正为增益,负为抑制)"""
+
+
+class MoodUpdateTask(AsyncTask):
+ def __init__(self):
+ super().__init__(
+ task_name="Mood Update Task",
+ wait_before_start=global_config.mood_update_interval,
+ run_interval=global_config.mood_update_interval,
+ )
+
+ # 从配置文件获取衰减率
+ self.decay_rate_valence: float = 1 - global_config.mood_decay_rate
+ """愉悦度衰减率"""
+ self.decay_rate_arousal: float = 1 - global_config.mood_decay_rate
+ """唤醒度衰减率"""
+
+ self.last_update = time.time()
+ """上次更新时间"""
+
+ async def run(self):
+ current_time = time.time()
+ time_diff = current_time - self.last_update
+ agreeableness_factor = 1 # 宜人性系数
+ agreeableness_bias = 0 # 宜人性偏置
+ neuroticism_factor = 0.5 # 神经质系数
+ # 获取人格特质
+ personality = Individuality.get_instance().personality
+ if personality:
+ # 神经质:影响情绪变化速度
+ neuroticism_factor = 1 + (personality.neuroticism - 0.5) * 0.4
+ agreeableness_factor = 1 + (personality.agreeableness - 0.5) * 0.4
+
+ # 宜人性:影响情绪基准线
+ if personality.agreeableness < 0.2:
+ agreeableness_bias = (personality.agreeableness - 0.2) * 0.5
+ elif personality.agreeableness > 0.8:
+ agreeableness_bias = (personality.agreeableness - 0.8) * 0.5
+ else:
+ agreeableness_bias = 0
+
+ # 分别计算正向和负向的衰减率
+ if mood_manager.current_mood.valence >= 0:
+ # 正向情绪衰减
+ decay_rate_positive = self.decay_rate_valence * (1 / agreeableness_factor)
+ valence_target = 0 + agreeableness_bias
+ new_valence = valence_target + (mood_manager.current_mood.valence - valence_target) * math.exp(
+ -decay_rate_positive * time_diff * neuroticism_factor
+ )
+ else:
+ # 负向情绪衰减
+ decay_rate_negative = self.decay_rate_valence * agreeableness_factor
+ valence_target = 0 + agreeableness_bias
+ new_valence = valence_target + (mood_manager.current_mood.valence - valence_target) * math.exp(
+ -decay_rate_negative * time_diff * neuroticism_factor
+ )
+
+ # Arousal 向中性(0)回归
+ arousal_target = 0
+ new_arousal = arousal_target + (mood_manager.current_mood.arousal - arousal_target) * math.exp(
+ -self.decay_rate_arousal * time_diff * neuroticism_factor
+ )
+
+ mood_manager.set_current_mood(new_valence, new_arousal)
+
+ self.last_update = current_time
+
+
+class MoodPrintTask(AsyncTask):
+ def __init__(self):
+ super().__init__(
+ task_name="Mood Print Task",
+ wait_before_start=60,
+ run_interval=60,
+ )
+
+ async def run(self):
+ # 打印当前心情
+ logger.info(
+ f"愉悦度: {mood_manager.current_mood.valence:.2f}, "
+ f"唤醒度: {mood_manager.current_mood.arousal:.2f}, "
+ f"心情: {mood_manager.current_mood.text}"
+ )
+
+
+class MoodManager:
+ # TODO: 改进,使用具有实验支持的新情绪模型
+
+ EMOTION_FACTOR_MAP: Dict[str, Tuple[float, float]] = {
+ "开心": (0.21, 0.6),
+ "害羞": (0.15, 0.2),
+ "愤怒": (-0.24, 0.8),
+ "恐惧": (-0.21, 0.7),
+ "悲伤": (-0.21, 0.3),
+ "厌恶": (-0.12, 0.4),
+ "惊讶": (0.06, 0.7),
+ "困惑": (0.0, 0.6),
+ "平静": (0.03, 0.5),
+ }
+ """
+ 情绪词映射表 {mood: (valence, arousal)}
+ 将情绪描述词映射到愉悦度和唤醒度的元组
+ """
+
+ EMOTION_POINT_MAP: Dict[Tuple[float, float], str] = {
+ # 第一象限:高唤醒,正愉悦
+ (0.5, 0.4): "兴奋",
+ (0.3, 0.6): "快乐",
+ (0.2, 0.3): "满足",
+ # 第二象限:高唤醒,负愉悦
+ (-0.5, 0.4): "愤怒",
+ (-0.3, 0.6): "焦虑",
+ (-0.2, 0.3): "烦躁",
+ # 第三象限:低唤醒,负愉悦
+ (-0.5, -0.4): "悲伤",
+ (-0.3, -0.3): "疲倦",
+ (-0.4, -0.7): "疲倦",
+ # 第四象限:低唤醒,正愉悦
+ (0.2, -0.1): "平静",
+ (0.3, -0.2): "安宁",
+ (0.5, -0.4): "放松",
+ }
+ """
+ 情绪文本映射表 {(valence, arousal): mood}
+ 将量化的情绪状态元组映射到文本描述
+ """
+
+ def __init__(self):
+ self.current_mood = MoodState(
+ valence=0.0,
+ arousal=0.0,
+ text="平静",
+ )
+ """当前情绪状态"""
+
+ self.mood_change_history: MoodChangeHistory = MoodChangeHistory(
+ valence_direction_factor=0,
+ arousal_direction_factor=0,
+ )
+ """情绪变化历史"""
+
+ self._lock = asyncio.Lock()
+ """异步锁,用于保护线程安全"""
+
+ def set_current_mood(self, new_valence: float, new_arousal: float):
+ """
+ 设置当前情绪状态
+ :param new_valence: 新的愉悦度
+ :param new_arousal: 新的唤醒度
+ """
+ # 限制范围
+ self.current_mood.valence = max(-1.0, min(new_valence, 1.0))
+ self.current_mood.arousal = max(-1.0, min(new_arousal, 1.0))
+
+ closest_mood = None
+ min_distance = float("inf")
+
+ for (v, a), text in self.EMOTION_POINT_MAP.items():
+ # 计算当前情绪状态与每个情绪文本的欧氏距离
+ distance = math.sqrt((self.current_mood.valence - v) ** 2 + (self.current_mood.arousal - a) ** 2)
+ if distance < min_distance:
+ min_distance = distance
+ closest_mood = text
+
+ if closest_mood:
+ self.current_mood.text = closest_mood
+
+ def update_current_mood(self, valence_delta: float, arousal_delta: float):
+ """
+ 根据愉悦度和唤醒度变化量更新当前情绪状态
+ :param valence_delta: 愉悦度变化量
+ :param arousal_delta: 唤醒度变化量
+ """
+ # 计算连续增益/抑制
+ # 规则:多次相同方向的变化会有更大的影响系数,反方向的变化会清零影响系数(系数的正负号由变化方向决定)
+ if valence_delta * self.mood_change_history.valence_direction_factor > 0:
+ # 如果方向相同,则根据变化方向改变系数
+ if valence_delta > 0:
+ self.mood_change_history.valence_direction_factor += 1 # 若为正向,则增加
+ else:
+ self.mood_change_history.valence_direction_factor -= 1 # 若为负向,则减少
+ else:
+ # 如果方向不同,则重置计数
+ self.mood_change_history.valence_direction_factor = 0
+
+ if arousal_delta * self.mood_change_history.arousal_direction_factor > 0:
+ # 如果方向相同,则根据变化方向改变系数
+ if arousal_delta > 0:
+ self.mood_change_history.arousal_direction_factor += 1 # 若为正向,则增加计数
+ else:
+ self.mood_change_history.arousal_direction_factor -= 1 # 若为负向,则减少计数
+ else:
+ # 如果方向不同,则重置计数
+ self.mood_change_history.arousal_direction_factor = 0
+
+ # 计算增益/抑制的结果
+ # 规则:如果当前情绪状态与变化方向相同,则增益;否则抑制
+ if self.current_mood.valence * self.mood_change_history.valence_direction_factor > 0:
+ valence_delta = valence_delta * (1.01 ** abs(self.mood_change_history.valence_direction_factor))
+ else:
+ valence_delta = valence_delta * (0.99 ** abs(self.mood_change_history.valence_direction_factor))
+
+ if self.current_mood.arousal * self.mood_change_history.arousal_direction_factor > 0:
+ arousal_delta = arousal_delta * (1.01 ** abs(self.mood_change_history.arousal_direction_factor))
+ else:
+ arousal_delta = arousal_delta * (0.99 ** abs(self.mood_change_history.arousal_direction_factor))
+
+ self.set_current_mood(
+ new_valence=self.current_mood.valence + valence_delta,
+ new_arousal=self.current_mood.arousal + arousal_delta,
+ )
+
+ def get_mood_prompt(self) -> str:
+ """
+ 根据当前情绪状态生成提示词
+ """
+ base_prompt = f"当前心情:{self.current_mood.text}。"
+
+ # 根据情绪状态添加额外的提示信息
+ if self.current_mood.valence > 0.5:
+ base_prompt += "你现在心情很好,"
+ elif self.current_mood.valence < -0.5:
+ base_prompt += "你现在心情不太好,"
+
+ if self.current_mood.arousal > 0.4:
+ base_prompt += "情绪比较激动。"
+ elif self.current_mood.arousal < -0.4:
+ base_prompt += "情绪比较平静。"
+
+ return base_prompt
+
+ def get_arousal_multiplier(self) -> float:
+ """
+ 根据当前情绪状态返回唤醒度乘数
+ """
+ if self.current_mood.arousal > 0.4:
+ multiplier = 1 + min(0.15, (self.current_mood.arousal - 0.4) / 3)
+ return multiplier
+ elif self.current_mood.arousal < -0.4:
+ multiplier = 1 - min(0.15, ((0 - self.current_mood.arousal) - 0.4) / 3)
+ return multiplier
+ return 1.0
+
+ def update_mood_from_emotion(self, emotion: str, intensity: float = 1.0) -> None:
+ """
+ 根据情绪词更新心情状态
+ :param emotion: 情绪词(如'开心', '悲伤'等位于self.EMOTION_FACTOR_MAP中的键)
+ :param intensity: 情绪强度(0.0-1.0)
+ """
+ if emotion not in self.EMOTION_FACTOR_MAP:
+ logger.error(f"[情绪更新] 未知情绪词: {emotion}")
+ return
+
+ valence_change, arousal_change = self.EMOTION_FACTOR_MAP[emotion]
+ old_valence = self.current_mood.valence
+ old_arousal = self.current_mood.arousal
+ old_mood = self.current_mood.text
+
+ self.update_current_mood(valence_change, arousal_change) # 更新当前情绪状态
+
+ logger.info(
+ f"[情绪变化] {emotion}(强度:{intensity:.2f}) | 愉悦度:{old_valence:.2f}->{self.current_mood.valence:.2f}, 唤醒度:{old_arousal:.2f}->{self.current_mood.arousal:.2f} | 心情:{old_mood}->{self.current_mood.text}"
+ )
+
+
+mood_manager = MoodManager()
+"""全局情绪管理器"""
diff --git a/src/plugins/__init__.py b/src/plugins/__init__.py
index 2e057e6fe..631d9bbb7 100644
--- a/src/plugins/__init__.py
+++ b/src/plugins/__init__.py
@@ -6,7 +6,6 @@ MaiMBot插件系统
from .chat.chat_stream import chat_manager
from .emoji_system.emoji_manager import emoji_manager
from .person_info.relationship_manager import relationship_manager
-from .moods.moods import MoodManager
from .willing.willing_manager import willing_manager
from .schedule.schedule_generator import bot_schedule
@@ -15,7 +14,6 @@ __all__ = [
"chat_manager",
"emoji_manager",
"relationship_manager",
- "MoodManager",
"willing_manager",
"bot_schedule",
]
diff --git a/src/plugins/chat/bot.py b/src/plugins/chat/bot.py
index 9c4a33581..79e97c4f3 100644
--- a/src/plugins/chat/bot.py
+++ b/src/plugins/chat/bot.py
@@ -1,16 +1,15 @@
+import traceback
from typing import Dict, Any
-from ..moods.moods import MoodManager # 导入情绪管理器
-from ...config.config import global_config
-from .message import MessageRecv
-from ..PFC.pfc_manager import PFCManager
-from .chat_stream import chat_manager
-from .only_message_process import MessageProcessor
-
from src.common.logger_manager import get_logger
+from src.manager.mood_manager import mood_manager # 导入情绪管理器
+from .chat_stream import chat_manager
+from .message import MessageRecv
+from .only_message_process import MessageProcessor
+from ..PFC.pfc_manager import PFCManager
from ..heartFC_chat.heartflow_processor import HeartFCProcessor
from ..utils.prompt_builder import Prompt, global_prompt_manager
-import traceback
+from ...config.config import global_config
# 定义日志配置
@@ -23,7 +22,7 @@ class ChatBot:
def __init__(self):
self.bot = None # bot 实例引用
self._started = False
- self.mood_manager = MoodManager.get_instance() # 获取情绪管理器单例
+ self.mood_manager = mood_manager # 获取情绪管理器单例
self.heartflow_processor = HeartFCProcessor() # 新增
# 创建初始化PFC管理器的任务,会在_ensure_started时执行
diff --git a/src/plugins/chat/utils.py b/src/plugins/chat/utils.py
index 53e8f6f6e..c229f0a59 100644
--- a/src/plugins/chat/utils.py
+++ b/src/plugins/chat/utils.py
@@ -1,21 +1,20 @@
import random
-import time
import re
+import time
from collections import Counter
import jieba
import numpy as np
-from src.common.logger import get_module_logger
+from maim_message import UserInfo
from pymongo.errors import PyMongoError
+from src.common.logger import get_module_logger
+from src.manager.mood_manager import mood_manager
+from .message import MessageRecv
from ..models.utils_model import LLMRequest
from ..utils.typo_generator import ChineseTypoGenerator
-from ...config.config import global_config
-from .message import MessageRecv
-from maim_message import UserInfo
-from ..moods.moods import MoodManager
from ...common.database import db
-
+from ...config.config import global_config
logger = get_module_logger("chat_utils")
@@ -405,7 +404,6 @@ def calculate_typing_time(
- 在所有输入结束后,额外加上回车时间0.3秒
- 如果is_emoji为True,将使用固定1秒的输入时间
"""
- mood_manager = MoodManager.get_instance()
# 将0-1的唤醒度映射到-1到1
mood_arousal = mood_manager.current_mood.arousal
# 映射到0.5到2倍的速度系数
diff --git a/src/plugins/heartFC_chat/heartFC_chat.py b/src/plugins/heartFC_chat/heartFC_chat.py
index b594bf029..83abfbbed 100644
--- a/src/plugins/heartFC_chat/heartFC_chat.py
+++ b/src/plugins/heartFC_chat/heartFC_chat.py
@@ -1,33 +1,35 @@
import asyncio
+import contextlib
+import json # <--- 确保导入 json
+import random # <--- 添加导入
import time
import traceback
-import random # <--- 添加导入
-import json # <--- 确保导入 json
-from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
from collections import deque
+from typing import List, Optional, Dict, Any, Deque, Callable, Coroutine
+
+from rich.traceback import install
+
+from src.common.logger_manager import get_logger
+from src.config.config import global_config
+from src.heart_flow.observation import Observation
+from src.heart_flow.sub_mind import SubMind
+from src.heart_flow.utils_chat import get_chat_type_and_target_info
+from src.manager.mood_manager import mood_manager
+from src.plugins.chat.chat_stream import ChatStream
+from src.plugins.chat.chat_stream import chat_manager
from src.plugins.chat.message import MessageRecv, BaseMessageInfo, MessageThinking, MessageSending
from src.plugins.chat.message import Seg # Local import needed after move
-from src.plugins.chat.chat_stream import ChatStream
from src.plugins.chat.message import UserInfo
-from src.plugins.chat.chat_stream import chat_manager
-from src.common.logger_manager import get_logger
-from src.plugins.models.utils_model import LLMRequest
-from src.config.config import global_config
-from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
-from src.plugins.utils.timer_calculator import Timer # <--- Import Timer
-from src.plugins.emoji_system.emoji_manager import emoji_manager
-from src.heart_flow.sub_mind import SubMind
-from src.heart_flow.observation import Observation
-from src.plugins.heartFC_chat.heartflow_prompt_builder import global_prompt_manager, prompt_builder
-import contextlib
-from src.plugins.utils.chat_message_builder import num_new_messages_since
-from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
-from .heartFC_sender import HeartFCSender
from src.plugins.chat.utils import process_llm_response
+from src.plugins.chat.utils_image import image_path_to_base64 # Local import needed after move
+from src.plugins.emoji_system.emoji_manager import emoji_manager
+from src.plugins.heartFC_chat.heartFC_Cycleinfo import CycleInfo
+from src.plugins.heartFC_chat.heartflow_prompt_builder import global_prompt_manager, prompt_builder
+from src.plugins.models.utils_model import LLMRequest
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
-from src.plugins.moods.moods import MoodManager
-from src.heart_flow.utils_chat import get_chat_type_and_target_info
-from rich.traceback import install
+from src.plugins.utils.chat_message_builder import num_new_messages_since
+from src.plugins.utils.timer_calculator import Timer # <--- Import Timer
+from .heartFC_sender import HeartFCSender
install(extra_lines=3)
@@ -1275,7 +1277,7 @@ class HeartFChatting:
"""
try:
# 1. 获取情绪影响因子并调整模型温度
- arousal_multiplier = MoodManager.get_instance().get_arousal_multiplier()
+ arousal_multiplier = mood_manager.get_arousal_multiplier()
current_temp = global_config.llm_normal["temp"] * arousal_multiplier
self.model_normal.temperature = current_temp # 动态调整温度
diff --git a/src/plugins/heartFC_chat/heartflow_prompt_builder.py b/src/plugins/heartFC_chat/heartflow_prompt_builder.py
index c59168a7f..c4a137a78 100644
--- a/src/plugins/heartFC_chat/heartflow_prompt_builder.py
+++ b/src/plugins/heartFC_chat/heartflow_prompt_builder.py
@@ -10,7 +10,7 @@ import time
from typing import Union, Optional, Deque, Dict, Any
from ...common.database import db
from ..chat.utils import get_recent_group_speaker
-from ..moods.moods import MoodManager
+from src.manager.mood_manager import mood_manager
from ..memory_system.Hippocampus import HippocampusManager
from ..schedule.schedule_generator import bot_schedule
from ..knowledge.knowledge_lib import qa_manager
@@ -341,8 +341,7 @@ class PromptBuilder:
else:
logger.warning(f"Invalid person tuple encountered for relationship prompt: {person}")
- mood_manager = MoodManager.get_instance()
- mood_prompt = mood_manager.get_prompt()
+ mood_prompt = mood_manager.get_mood_prompt()
reply_styles1 = [
("然后给出日常且口语化的回复,平淡一些", 0.4),
("给出非常简短的回复", 0.4),
diff --git a/src/plugins/heartFC_chat/normal_chat.py b/src/plugins/heartFC_chat/normal_chat.py
index be05f4d70..460e881a0 100644
--- a/src/plugins/heartFC_chat/normal_chat.py
+++ b/src/plugins/heartFC_chat/normal_chat.py
@@ -1,26 +1,26 @@
-import time
import asyncio
-import traceback
import statistics # 导入 statistics 模块
+import time
+import traceback
from random import random
from typing import List, Optional # 导入 Optional
-from ..moods.moods import MoodManager
-from ...config.config import global_config
-from ..emoji_system.emoji_manager import emoji_manager
-from .normal_chat_generator import NormalChatGenerator
-from ..chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
-from ..chat.message_sender import message_manager
-from ..chat.utils_image import image_path_to_base64
-from ..willing.willing_manager import willing_manager
from maim_message import UserInfo, Seg
+
from src.common.logger_manager import get_logger
+from src.heart_flow.utils_chat import get_chat_type_and_target_info
+from src.manager.mood_manager import mood_manager
from src.plugins.chat.chat_stream import ChatStream, chat_manager
from src.plugins.person_info.relationship_manager import relationship_manager
from src.plugins.respon_info_catcher.info_catcher import info_catcher_manager
from src.plugins.utils.timer_calculator import Timer
-from src.heart_flow.utils_chat import get_chat_type_and_target_info
-
+from .normal_chat_generator import NormalChatGenerator
+from ..chat.message import MessageSending, MessageRecv, MessageThinking, MessageSet
+from ..chat.message_sender import message_manager
+from ..chat.utils_image import image_path_to_base64
+from ..emoji_system.emoji_manager import emoji_manager
+from ..willing.willing_manager import willing_manager
+from ...config.config import global_config
logger = get_logger("chat")
@@ -45,7 +45,7 @@ class NormalChat:
# Other sync initializations
self.gpt = NormalChatGenerator()
- self.mood_manager = MoodManager.get_instance()
+ self.mood_manager = mood_manager
self.start_time = time.time()
self.last_speak_time = 0
self._chat_task: Optional[asyncio.Task] = None
diff --git a/src/plugins/moods/moods.py b/src/plugins/moods/moods.py
deleted file mode 100644
index 1c025319f..000000000
--- a/src/plugins/moods/moods.py
+++ /dev/null
@@ -1,293 +0,0 @@
-import math
-import threading
-import time
-from dataclasses import dataclass
-
-from ...config.config import global_config
-from src.common.logger_manager import get_logger
-from ..person_info.relationship_manager import relationship_manager
-from src.individuality.individuality import Individuality
-
-
-logger = get_logger("mood")
-
-
-@dataclass
-class MoodState:
- valence: float # 愉悦度 (-1.0 到 1.0),-1表示极度负面,1表示极度正面
- arousal: float # 唤醒度 (-1.0 到 1.0),-1表示抑制,1表示兴奋
- text: str # 心情文本描述
-
-
-class MoodManager:
- _instance = None
- _lock = threading.Lock()
-
- def __new__(cls):
- with cls._lock:
- if cls._instance is None:
- cls._instance = super().__new__(cls)
- cls._instance._initialized = False
- return cls._instance
-
- def __init__(self):
- # 确保初始化代码只运行一次
- if self._initialized:
- return
-
- self._initialized = True
-
- # 初始化心情状态
- self.current_mood = MoodState(valence=0.0, arousal=0.0, text="平静")
-
- # 从配置文件获取衰减率
- self.decay_rate_valence = 1 - global_config.mood_decay_rate # 愉悦度衰减率
- self.decay_rate_arousal = 1 - global_config.mood_decay_rate # 唤醒度衰减率
-
- # 上次更新时间
- self.last_update = time.time()
-
- # 线程控制
- self._running = False
- self._update_thread = None
-
- # 情绪词映射表 (valence, arousal)
- self.emotion_map = {
- "开心": (0.21, 0.6),
- "害羞": (0.15, 0.2),
- "愤怒": (-0.24, 0.8),
- "恐惧": (-0.21, 0.7),
- "悲伤": (-0.21, 0.3),
- "厌恶": (-0.12, 0.4),
- "惊讶": (0.06, 0.7),
- "困惑": (0.0, 0.6),
- "平静": (0.03, 0.5),
- }
-
- # 情绪文本映射表
- self.mood_text_map = {
- # 第一象限:高唤醒,正愉悦
- (0.5, 0.4): "兴奋",
- (0.3, 0.6): "快乐",
- (0.2, 0.3): "满足",
- # 第二象限:高唤醒,负愉悦
- (-0.5, 0.4): "愤怒",
- (-0.3, 0.6): "焦虑",
- (-0.2, 0.3): "烦躁",
- # 第三象限:低唤醒,负愉悦
- (-0.5, -0.4): "悲伤",
- (-0.3, -0.3): "疲倦",
- (-0.4, -0.7): "疲倦",
- # 第四象限:低唤醒,正愉悦
- (0.2, -0.1): "平静",
- (0.3, -0.2): "安宁",
- (0.5, -0.4): "放松",
- }
-
- @classmethod
- def get_instance(cls) -> "MoodManager":
- """获取MoodManager的单例实例"""
- if cls._instance is None:
- cls._instance = MoodManager()
- return cls._instance
-
- def start_mood_update(self, update_interval: float = 5.0) -> None:
- """
- 启动情绪更新线程
- :param update_interval: 更新间隔(秒)
- """
- if self._running:
- return
-
- self._running = True
- self._update_thread = threading.Thread(
- target=self._continuous_mood_update, args=(update_interval,), daemon=True
- )
- self._update_thread.start()
-
- def stop_mood_update(self) -> None:
- """停止情绪更新线程"""
- self._running = False
- if self._update_thread and self._update_thread.is_alive():
- self._update_thread.join()
-
- def _continuous_mood_update(self, update_interval: float) -> None:
- """
- 持续更新情绪状态的线程函数
- :param update_interval: 更新间隔(秒)
- """
- while self._running:
- self._apply_decay()
- self._update_mood_text()
- time.sleep(update_interval)
-
- def _apply_decay(self) -> None:
- """应用情绪衰减,正向和负向情绪分开计算"""
- current_time = time.time()
- time_diff = current_time - self.last_update
- agreeableness_factor = 1
- agreeableness_bias = 0
- neuroticism_factor = 0.5
-
- # 获取人格特质
- personality = Individuality.get_instance().personality
- if personality:
- # 神经质:影响情绪变化速度
- neuroticism_factor = 1 + (personality.neuroticism - 0.5) * 0.4
- agreeableness_factor = 1 + (personality.agreeableness - 0.5) * 0.4
-
- # 宜人性:影响情绪基准线
- if personality.agreeableness < 0.2:
- agreeableness_bias = (personality.agreeableness - 0.2) * 0.5
- elif personality.agreeableness > 0.8:
- agreeableness_bias = (personality.agreeableness - 0.8) * 0.5
- else:
- agreeableness_bias = 0
-
- # 分别计算正向和负向的衰减率
- if self.current_mood.valence >= 0:
- # 正向情绪衰减
- decay_rate_positive = self.decay_rate_valence * (1 / agreeableness_factor)
- valence_target = 0 + agreeableness_bias
- self.current_mood.valence = valence_target + (self.current_mood.valence - valence_target) * math.exp(
- -decay_rate_positive * time_diff * neuroticism_factor
- )
- else:
- # 负向情绪衰减
- decay_rate_negative = self.decay_rate_valence * agreeableness_factor
- valence_target = 0 + agreeableness_bias
- self.current_mood.valence = valence_target + (self.current_mood.valence - valence_target) * math.exp(
- -decay_rate_negative * time_diff * neuroticism_factor
- )
-
- # Arousal 向中性(0)回归
- arousal_target = 0
- self.current_mood.arousal = arousal_target + (self.current_mood.arousal - arousal_target) * math.exp(
- -self.decay_rate_arousal * time_diff * neuroticism_factor
- )
-
- # 确保值在合理范围内
- self.current_mood.valence = max(-1.0, min(1.0, self.current_mood.valence))
- self.current_mood.arousal = max(-1.0, min(1.0, self.current_mood.arousal))
-
- self.last_update = current_time
-
- def update_mood_from_text(self, text: str, valence_change: float, arousal_change: float) -> None:
- """根据输入文本更新情绪状态"""
-
- self.current_mood.valence += valence_change
- self.current_mood.arousal += arousal_change
-
- # 限制范围
- self.current_mood.valence = max(-1.0, min(1.0, self.current_mood.valence))
- self.current_mood.arousal = max(-1.0, min(1.0, self.current_mood.arousal))
-
- self._update_mood_text()
-
- def set_mood_text(self, text: str) -> None:
- """直接设置心情文本"""
- self.current_mood.text = text
-
- def _update_mood_text(self) -> None:
- """根据当前情绪状态更新文本描述"""
- closest_mood = None
- min_distance = float("inf")
-
- for (v, a), text in self.mood_text_map.items():
- distance = math.sqrt((self.current_mood.valence - v) ** 2 + (self.current_mood.arousal - a) ** 2)
- if distance < min_distance:
- min_distance = distance
- closest_mood = text
-
- if closest_mood:
- self.current_mood.text = closest_mood
-
- def update_mood_by_user(self, user_id: str, valence_change: float, arousal_change: float) -> None:
- """根据用户ID更新情绪状态"""
-
- # 这里可以根据用户ID添加特定的权重或规则
- weight = 1.0 # 默认权重
-
- self.current_mood.valence += valence_change * weight
- self.current_mood.arousal += arousal_change * weight
-
- # 限制范围
- self.current_mood.valence = max(-1.0, min(1.0, self.current_mood.valence))
- self.current_mood.arousal = max(-1.0, min(1.0, self.current_mood.arousal))
-
- self._update_mood_text()
-
- def get_prompt(self) -> str:
- """根据当前情绪状态生成提示词"""
-
- base_prompt = f"当前心情:{self.current_mood.text}。"
-
- # 根据情绪状态添加额外的提示信息
- if self.current_mood.valence > 0.5:
- base_prompt += "你现在心情很好,"
- elif self.current_mood.valence < -0.5:
- base_prompt += "你现在心情不太好,"
-
- if self.current_mood.arousal > 0.4:
- base_prompt += "情绪比较激动。"
- elif self.current_mood.arousal < -0.4:
- base_prompt += "情绪比较平静。"
-
- return base_prompt
-
- def get_arousal_multiplier(self) -> float:
- """根据当前情绪状态返回唤醒度乘数"""
- if self.current_mood.arousal > 0.4:
- multiplier = 1 + min(0.15, (self.current_mood.arousal - 0.4) / 3)
- return multiplier
- elif self.current_mood.arousal < -0.4:
- multiplier = 1 - min(0.15, ((0 - self.current_mood.arousal) - 0.4) / 3)
- return multiplier
- return 1.0
-
- def get_current_mood(self) -> MoodState:
- """获取当前情绪状态"""
- return self.current_mood
-
- def print_mood_status(self) -> None:
- """打印当前情绪状态"""
- logger.info(
- f"愉悦度: {self.current_mood.valence:.2f}, "
- f"唤醒度: {self.current_mood.arousal:.2f}, "
- f"心情: {self.current_mood.text}"
- )
-
- def update_mood_from_emotion(self, emotion: str, intensity: float = 1.0) -> None:
- """
- 根据情绪词更新心情状态
- :param emotion: 情绪词(如'happy', 'sad'等)
- :param intensity: 情绪强度(0.0-1.0)
- """
- if emotion not in self.emotion_map:
- logger.debug(f"[情绪更新] 未知情绪词: {emotion}")
- return
-
- valence_change, arousal_change = self.emotion_map[emotion]
- old_valence = self.current_mood.valence
- old_arousal = self.current_mood.arousal
- old_mood = self.current_mood.text
-
- valence_change = relationship_manager.feedback_to_mood(valence_change)
-
- # 应用情绪强度
- valence_change *= intensity
- arousal_change *= intensity
-
- # 更新当前情绪状态
- self.current_mood.valence += valence_change
- self.current_mood.arousal += arousal_change
-
- # 限制范围
- self.current_mood.valence = max(-1.0, min(1.0, self.current_mood.valence))
- self.current_mood.arousal = max(-1.0, min(1.0, self.current_mood.arousal))
-
- self._update_mood_text()
-
- logger.info(
- f"[情绪变化] {emotion}(强度:{intensity:.2f}) | 愉悦度:{old_valence:.2f}->{self.current_mood.valence:.2f}, 唤醒度:{old_arousal:.2f}->{self.current_mood.arousal:.2f} | 心情:{old_mood}->{self.current_mood.text}"
- )
diff --git a/src/plugins/person_info/relationship_manager.py b/src/plugins/person_info/relationship_manager.py
index 862f23984..e9dad4b74 100644
--- a/src/plugins/person_info/relationship_manager.py
+++ b/src/plugins/person_info/relationship_manager.py
@@ -6,6 +6,9 @@ from .person_info import person_info_manager
import time
import random
from maim_message import UserInfo
+
+from ...manager.mood_manager import mood_manager
+
# import re
# import traceback
@@ -22,9 +25,7 @@ class RelationshipManager:
@property
def mood_manager(self):
if self._mood_manager is None:
- from ..moods.moods import MoodManager # 延迟导入
-
- self._mood_manager = MoodManager.get_instance()
+ self._mood_manager = mood_manager
return self._mood_manager
def positive_feedback_sys(self, label: str, stance: str):
@@ -60,9 +61,7 @@ class RelationshipManager:
def mood_feedback(self, value):
"""情绪反馈"""
mood_manager = self.mood_manager
- mood_gain = mood_manager.get_current_mood().valence ** 2 * math.copysign(
- 1, value * mood_manager.get_current_mood().valence
- )
+ mood_gain = mood_manager.current_mood.valence**2 * math.copysign(1, value * mood_manager.current_mood.valence)
value += value * mood_gain
logger.info(f"当前relationship增益系数:{mood_gain:.3f}")
return value
From 13db955454f40ed6f551cb1854ff535b68ec4c38 Mon Sep 17 00:00:00 2001
From: Oct-autumn
Date: Fri, 9 May 2025 02:37:52 +0800
Subject: [PATCH 18/20] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E7=BB=9F?=
=?UTF-8?q?=E8=AE=A1=E6=97=B6=E6=AE=B5=E5=BC=82=E5=B8=B8=E7=9A=84=E9=97=AE?=
=?UTF-8?q?=E9=A2=98?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/plugins/utils/statistic.py | 98 ++++++++++++++++++++--------------
1 file changed, 59 insertions(+), 39 deletions(-)
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index a0de95ec9..66bdf279f 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -103,7 +103,7 @@ class StatisticOutputTask(AsyncTask):
def __init__(self, record_file_path: str = "llm_statistics.txt"):
# 延迟300秒启动,运行间隔300秒
- super().__init__(task_name="Statistics Data Output Task", wait_before_start=300, run_interval=300)
+ super().__init__(task_name="Statistics Data Output Task", wait_before_start=0, run_interval=300)
self.name_mapping: Dict[str, Tuple[str, float]] = {}
"""
@@ -117,25 +117,35 @@ class StatisticOutputTask(AsyncTask):
"""
now = datetime.now()
- self.stat_period: List[Tuple[str, datetime, str]] = [
- ("all_time", datetime(2000, 1, 1), "自部署以来的"),
- ("last_7_days", now - timedelta(days=7), "最近7天的"),
- ("last_24_hours", now - timedelta(days=1), "最近24小时的"),
- ("last_hour", now - timedelta(hours=1), "最近1小时的"),
+ if "deploy_time" in local_storage:
+ # 如果存在部署时间,则使用该时间作为全量统计的起始时间
+ deploy_time = datetime.fromtimestamp(local_storage["deploy_time"])
+ else:
+ # 否则,使用最大时间范围,并记录部署时间为当前时间
+ deploy_time = datetime(2000, 1, 1)
+ local_storage["deploy_time"] = now.timestamp()
+
+ self.stat_period: List[Tuple[str, timedelta, str]] = [
+ ("all_time", now - deploy_time, "自部署以来的"),
+ ("last_7_days", timedelta(days=7), "最近7天的"),
+ ("last_24_hours", timedelta(days=1), "最近24小时的"),
+ ("last_hour", timedelta(hours=1), "最近1小时的"),
]
"""
- 统计时间段
+ 统计时间段 [(统计名称, 统计时间段, 统计描述), ...]
"""
- def _statistic_console_output(self, stats: Dict[str, Any]):
+ def _statistic_console_output(self, stats: Dict[str, Any], now: datetime):
"""
输出统计数据到控制台
+ :param stats: 统计数据
+ :param now: 基准当前时间
"""
# 输出最近一小时的统计数据
output = [
self.SEP_LINE,
- f" 最近1小时的统计数据 (详细信息见文件:{self.record_file_path})",
+ f" 最近1小时的统计数据 (自{now.strftime('%Y-%m-%d %H:%M:%S')}开始,详细信息见文件:{self.record_file_path})",
self.SEP_LINE,
self._format_total_stat(stats["last_hour"]),
"",
@@ -148,11 +158,11 @@ class StatisticOutputTask(AsyncTask):
logger.info("\n" + "\n".join(output))
- def _statistic_file_output(self, stats: Dict[str, Any]):
+ def _statistic_file_output(self, stats: Dict[str, Any], now: datetime):
"""
输出统计数据到文件
"""
- output = [f"MaiBot运行统计报告 (生成时间:{datetime.now().strftime('%Y-%m-%d %H:%M:%S')})", ""]
+ output = [f"MaiBot运行统计报告 (统计截止时间:{now.strftime('%Y-%m-%d %H:%M:%S')})", ""]
def _format_stat_data(title: str, stats_: Dict[str, Any]) -> str:
"""
@@ -172,16 +182,21 @@ class StatisticOutputTask(AsyncTask):
self._format_user_classified_stat(stats_),
"",
self._format_chat_stat(stats_),
- "",
]
)
- for period_key, period_start_time, period_desc in self.stat_period:
+ for period_key, period_interval, period_desc in self.stat_period:
if period_key in stats:
+ start_time = (
+ datetime.fromtimestamp(local_storage["deploy_time"])
+ if period_key == "all_time"
+ else now - period_interval
+ )
# 统计数据存在
output.append(
_format_stat_data(
- f"{period_desc}统计数据 (自{period_start_time.strftime('%Y-%m-%d %H:%M:%S')}开始)",
+ f"{period_desc}统计数据 "
+ f"(统计时段:{start_time.strftime('%Y-%m-%d %H:%M:%S')} ~ {now.strftime('%Y-%m-%d %H:%M:%S')})",
stats[period_key],
)
)
@@ -191,20 +206,21 @@ class StatisticOutputTask(AsyncTask):
async def run(self):
try:
+ now = datetime.now()
# 收集统计数据
- stats = self._collect_all_statistics()
+ stats = self._collect_all_statistics(now)
# 输出统计数据到控制台
- self._statistic_console_output(stats)
+ self._statistic_console_output(stats, now)
# 输出统计数据到文件
- self._statistic_file_output(stats)
+ self._statistic_file_output(stats, now)
except Exception as e:
logger.exception(f"输出统计数据过程中发生异常,错误信息:{e}")
# -- 以下为统计数据收集方法 --
@staticmethod
- def _collect_model_request_for_period(collect_period: List[Tuple[str, datetime, str]]) -> Dict[str, Any]:
+ def _collect_model_request_for_period(collect_period: List[Tuple[str, datetime]]) -> Dict[str, Any]:
"""
收集指定时间段的LLM请求统计数据
@@ -243,17 +259,17 @@ class StatisticOutputTask(AsyncTask):
COST_BY_USER: defaultdict(float),
COST_BY_MODEL: defaultdict(float),
}
- for period_key, _, _ in collect_period
+ for period_key, _ in collect_period
}
# 以最早的时间戳为起始时间获取记录
for record in db.llm_usage.find({"timestamp": {"$gte": collect_period[-1][1]}}):
record_timestamp = record.get("timestamp")
- for idx, (_, period_start, _) in enumerate(collect_period):
+ for idx, (_, period_start) in enumerate(collect_period):
if record_timestamp >= period_start:
# 如果记录时间在当前时间段内,则它一定在更早的时间段内
# 因此,我们可以直接跳过更早的时间段的判断,直接更新当前以及更早时间段的统计数据
- for period_key, _, _ in collect_period[idx:]:
+ for period_key, _ in collect_period[idx:]:
stats[period_key][TOTAL_REQ_CNT] += 1
request_type = record.get("request_type", "unknown") # 请求类型
@@ -290,7 +306,7 @@ class StatisticOutputTask(AsyncTask):
return stats
@staticmethod
- def _collect_online_time_for_period(collect_period: List[Tuple[str, datetime, str]]) -> Dict[str, Any]:
+ def _collect_online_time_for_period(collect_period: List[Tuple[str, datetime]], now: datetime) -> Dict[str, Any]:
"""
收集指定时间段的在线时间统计数据
@@ -307,17 +323,20 @@ class StatisticOutputTask(AsyncTask):
# 在线时间统计
ONLINE_TIME: 0.0,
}
- for period_key, _, _ in collect_period
+ for period_key, _ in collect_period
}
# 统计在线时间
for record in db.online_time.find({"end_timestamp": {"$gte": collect_period[-1][1]}}):
end_timestamp: datetime = record.get("end_timestamp")
- for idx, (_, period_start, _) in enumerate(collect_period):
+ for idx, (_, period_start) in enumerate(collect_period):
if end_timestamp >= period_start:
+ # 由于end_timestamp会超前标记时间,所以我们需要判断是否晚于当前时间,如果是,则使用当前时间作为结束时间
+ if end_timestamp > now:
+ end_timestamp = now
# 如果记录时间在当前时间段内,则它一定在更早的时间段内
# 因此,我们可以直接跳过更早的时间段的判断,直接更新当前以及更早时间段的统计数据
- for period_key, _period_start, _ in collect_period[idx:]:
+ for period_key, _period_start in collect_period[idx:]:
start_timestamp: datetime = record.get("start_timestamp")
if start_timestamp < _period_start:
# 如果开始时间在查询边界之前,则使用开始时间
@@ -329,7 +348,7 @@ class StatisticOutputTask(AsyncTask):
return stats
- def _collect_message_count_for_period(self, collect_period: List[Tuple[str, datetime, str]]) -> Dict[str, Any]:
+ def _collect_message_count_for_period(self, collect_period: List[Tuple[str, datetime]]) -> Dict[str, Any]:
"""
收集指定时间段的消息统计数据
@@ -347,7 +366,7 @@ class StatisticOutputTask(AsyncTask):
TOTAL_MSG_CNT: 0,
MSG_CNT_BY_CHAT: defaultdict(int),
}
- for period_key, _, _ in collect_period
+ for period_key, _ in collect_period
}
# 统计消息量
@@ -375,42 +394,43 @@ class StatisticOutputTask(AsyncTask):
else:
self.name_mapping[chat_id] = (chat_name, message_time)
- for idx, (_, period_start, _) in enumerate(collect_period):
+ for idx, (_, period_start) in enumerate(collect_period):
if message_time >= period_start.timestamp():
# 如果记录时间在当前时间段内,则它一定在更早的时间段内
# 因此,我们可以直接跳过更早的时间段的判断,直接更新当前以及更早时间段的统计数据
- for period_key, _, _ in collect_period[idx:]:
+ for period_key, _ in collect_period[idx:]:
stats[period_key][TOTAL_MSG_CNT] += 1
stats[period_key][MSG_CNT_BY_CHAT][chat_id] += 1
break
return stats
- def _collect_all_statistics(self) -> Dict[str, Dict[str, Any]]:
+ def _collect_all_statistics(self, now: datetime) -> Dict[str, Dict[str, Any]]:
"""
收集各时间段的统计数据
+ :param now: 基准当前时间
"""
- now = datetime.now()
-
last_all_time_stat = None
- stat = {period[0]: {} for period in self.stat_period}
-
if "last_full_statistics_timestamp" in local_storage and "last_full_statistics" in local_storage:
# 若存有上次完整统计的时间戳,则使用该时间戳作为"所有时间"的起始时间,进行增量统计
last_full_stat_ts: float = local_storage["last_full_statistics_timestamp"]
last_all_time_stat = local_storage["last_full_statistics"]
self.stat_period = [item for item in self.stat_period if item[0] != "all_time"] # 删除"所有时间"的统计时段
- self.stat_period.append(("all_time", datetime.fromtimestamp(last_full_stat_ts), "自部署以来的"))
+ self.stat_period.append(("all_time", now - datetime.fromtimestamp(last_full_stat_ts), "自部署以来的"))
- model_req_stat = self._collect_model_request_for_period(self.stat_period)
- online_time_stat = self._collect_online_time_for_period(self.stat_period)
- message_count_stat = self._collect_message_count_for_period(self.stat_period)
+ stat_start_timestamp = [(period[0], now - period[1]) for period in self.stat_period]
+
+ stat = {item[0]: {} for item in self.stat_period}
+
+ model_req_stat = self._collect_model_request_for_period(stat_start_timestamp)
+ online_time_stat = self._collect_online_time_for_period(stat_start_timestamp, now)
+ message_count_stat = self._collect_message_count_for_period(stat_start_timestamp)
# 统计数据合并
# 合并三类统计数据
- for period_key, _, _ in self.stat_period:
+ for period_key, _ in stat_start_timestamp:
stat[period_key].update(model_req_stat[period_key])
stat[period_key].update(online_time_stat[period_key])
stat[period_key].update(message_count_stat[period_key])
From 6056ba47cc828e1b0ac6210b9793598ba629dbfb Mon Sep 17 00:00:00 2001
From: Oct-autumn
Date: Fri, 9 May 2025 15:36:07 +0800
Subject: [PATCH 19/20] =?UTF-8?q?feat:=20=E7=BB=9F=E8=AE=A1=E7=94=9F?=
=?UTF-8?q?=E6=88=90HTML=E6=A0=BC=E5=BC=8F=E7=9A=84=E7=BB=9F=E8=AE=A1?=
=?UTF-8?q?=E6=8A=A5=E5=91=8A?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.gitignore | 2 +-
src/plugins/utils/statistic.py | 427 ++++++++++++++++++++++++---------
2 files changed, 312 insertions(+), 117 deletions(-)
diff --git a/.gitignore b/.gitignore
index 9e1b96811..e8a931078 100644
--- a/.gitignore
+++ b/.gitignore
@@ -43,7 +43,7 @@ src/plugins/utils/statistic.py
__pycache__/
*.py[cod]
*$py.class
-llm_statistics.txt
+maibot_statistics.html
mongodb
napcat
run_dev.bat
diff --git a/src/plugins/utils/statistic.py b/src/plugins/utils/statistic.py
index 66bdf279f..4c11ba3d8 100644
--- a/src/plugins/utils/statistic.py
+++ b/src/plugins/utils/statistic.py
@@ -96,12 +96,37 @@ class OnlineTimeRecordTask(AsyncTask):
logger.exception("在线时间记录失败")
+def _format_online_time(online_seconds: int) -> str:
+ """
+ 格式化在线时间
+ :param online_seconds: 在线时间(秒)
+ :return: 格式化后的在线时间字符串
+ """
+ total_oneline_time = timedelta(seconds=online_seconds)
+
+ days = total_oneline_time.days
+ hours = total_oneline_time.seconds // 3600
+ minutes = (total_oneline_time.seconds // 60) % 60
+ seconds = total_oneline_time.seconds % 60
+ if days > 0:
+ # 如果在线时间超过1天,则格式化为“X天X小时X分钟”
+ total_oneline_time_str = f"{total_oneline_time.days}天{hours}小时{minutes}分钟{seconds}秒"
+ elif hours > 0:
+ # 如果在线时间超过1小时,则格式化为“X小时X分钟X秒”
+ total_oneline_time_str = f"{hours}小时{minutes}分钟{seconds}秒"
+ else:
+ # 其他情况格式化为“X分钟X秒”
+ total_oneline_time_str = f"{minutes}分钟{seconds}秒"
+
+ return total_oneline_time_str
+
+
class StatisticOutputTask(AsyncTask):
"""统计输出任务"""
SEP_LINE = "-" * 84
- def __init__(self, record_file_path: str = "llm_statistics.txt"):
+ def __init__(self, record_file_path: str = "maibot_statistics.html"):
# 延迟300秒启动,运行间隔300秒
super().__init__(task_name="Statistics Data Output Task", wait_before_start=0, run_interval=300)
@@ -126,10 +151,10 @@ class StatisticOutputTask(AsyncTask):
local_storage["deploy_time"] = now.timestamp()
self.stat_period: List[Tuple[str, timedelta, str]] = [
- ("all_time", now - deploy_time, "自部署以来的"),
- ("last_7_days", timedelta(days=7), "最近7天的"),
- ("last_24_hours", timedelta(days=1), "最近24小时的"),
- ("last_hour", timedelta(hours=1), "最近1小时的"),
+ ("all_time", now - deploy_time, "自部署以来"), # 必须保留“all_time”
+ ("last_7_days", timedelta(days=7), "最近7天"),
+ ("last_24_hours", timedelta(days=1), "最近24小时"),
+ ("last_hour", timedelta(hours=1), "最近1小时"),
]
"""
统计时间段 [(统计名称, 统计时间段, 统计描述), ...]
@@ -158,52 +183,6 @@ class StatisticOutputTask(AsyncTask):
logger.info("\n" + "\n".join(output))
- def _statistic_file_output(self, stats: Dict[str, Any], now: datetime):
- """
- 输出统计数据到文件
- """
- output = [f"MaiBot运行统计报告 (统计截止时间:{now.strftime('%Y-%m-%d %H:%M:%S')})", ""]
-
- def _format_stat_data(title: str, stats_: Dict[str, Any]) -> str:
- """
- 格式化统计数据
- """
- return "\n".join(
- [
- self.SEP_LINE,
- f" {title}",
- self.SEP_LINE,
- self._format_total_stat(stats_),
- "",
- self._format_model_classified_stat(stats_),
- "",
- self._format_req_type_classified_stat(stats_),
- "",
- self._format_user_classified_stat(stats_),
- "",
- self._format_chat_stat(stats_),
- ]
- )
-
- for period_key, period_interval, period_desc in self.stat_period:
- if period_key in stats:
- start_time = (
- datetime.fromtimestamp(local_storage["deploy_time"])
- if period_key == "all_time"
- else now - period_interval
- )
- # 统计数据存在
- output.append(
- _format_stat_data(
- f"{period_desc}统计数据 "
- f"(统计时段:{start_time.strftime('%Y-%m-%d %H:%M:%S')} ~ {now.strftime('%Y-%m-%d %H:%M:%S')})",
- stats[period_key],
- )
- )
-
- with open(self.record_file_path, "w", encoding="utf-8") as f:
- f.write("\n\n".join(output))
-
async def run(self):
try:
now = datetime.now()
@@ -212,8 +191,8 @@ class StatisticOutputTask(AsyncTask):
# 输出统计数据到控制台
self._statistic_console_output(stats, now)
- # 输出统计数据到文件
- self._statistic_file_output(stats, now)
+ # 输出统计数据到html文件
+ self._generate_html_report(stats, now)
except Exception as e:
logger.exception(f"输出统计数据过程中发生异常,错误信息:{e}")
@@ -340,10 +319,10 @@ class StatisticOutputTask(AsyncTask):
start_timestamp: datetime = record.get("start_timestamp")
if start_timestamp < _period_start:
# 如果开始时间在查询边界之前,则使用开始时间
- stats[period_key][ONLINE_TIME] += (end_timestamp - _period_start).total_seconds() / 60
+ stats[period_key][ONLINE_TIME] += (end_timestamp - _period_start).total_seconds()
else:
# 否则,使用开始时间
- stats[period_key][ONLINE_TIME] += (end_timestamp - start_timestamp).total_seconds() / 60
+ stats[period_key][ONLINE_TIME] += (end_timestamp - start_timestamp).total_seconds()
break # 取消更早时间段的判断
return stats
@@ -460,8 +439,9 @@ class StatisticOutputTask(AsyncTask):
"""
格式化总统计数据
"""
+
output = [
- f"总在线时间: {stats[ONLINE_TIME]:.1f}分钟",
+ f"总在线时间: {_format_online_time(stats[ONLINE_TIME])}",
f"总消息数: {stats[TOTAL_MSG_CNT]}",
f"总请求数: {stats[TOTAL_REQ_CNT]}",
f"总花费: {stats[TOTAL_COST]:.4f}¥",
@@ -495,66 +475,6 @@ class StatisticOutputTask(AsyncTask):
else:
return ""
- @staticmethod
- def _format_req_type_classified_stat(stats: Dict[str, Any]) -> str:
- """
- 格式化按请求类型分类的统计数据
- """
- if stats[TOTAL_REQ_CNT] > 0:
- # 按请求类型统计
- data_fmt = "{:<32} {:>10} {:>12} {:>12} {:>12} {:>9.4f}¥"
-
- output = [
- "按请求类型分类统计:",
- " 请求类型 调用次数 输入Token 输出Token Token总量 累计花费",
- ]
- for req_type, count in sorted(stats[REQ_CNT_BY_TYPE].items()):
- name = req_type[:29] + "..." if len(req_type) > 32 else req_type
- in_tokens = stats[IN_TOK_BY_TYPE][req_type]
- out_tokens = stats[OUT_TOK_BY_TYPE][req_type]
- tokens = stats[TOTAL_TOK_BY_TYPE][req_type]
- cost = stats[COST_BY_TYPE][req_type]
- output.append(data_fmt.format(name, count, in_tokens, out_tokens, tokens, cost))
-
- output.append("")
- return "\n".join(output)
- else:
- return ""
-
- @staticmethod
- def _format_user_classified_stat(stats: Dict[str, Any]) -> str:
- """
- 格式化按用户分类的统计数据
- """
- if stats[TOTAL_REQ_CNT] > 0:
- # 修正用户统计列宽
- data_fmt = "{:<32} {:>10} {:>12} {:>12} {:>12} {:>9.4f}¥"
-
- output = [
- "按用户分类统计:",
- " 用户名称 调用次数 输入Token 输出Token Token总量 累计花费",
- ]
- for user_id, count in sorted(stats[REQ_CNT_BY_USER].items()):
- in_tokens = stats[IN_TOK_BY_USER][user_id]
- out_tokens = stats[OUT_TOK_BY_USER][user_id]
- tokens = stats[TOTAL_TOK_BY_USER][user_id]
- cost = stats[COST_BY_USER][user_id]
- output.append(
- data_fmt.format(
- user_id[:22], # 不再添加省略号,保持原始ID
- count,
- in_tokens,
- out_tokens,
- tokens,
- cost,
- )
- )
-
- output.append("")
- return "\n".join(output)
- else:
- return ""
-
def _format_chat_stat(self, stats: Dict[str, Any]) -> str:
"""
格式化聊天统计数据
@@ -568,3 +488,278 @@ class StatisticOutputTask(AsyncTask):
return "\n".join(output)
else:
return ""
+
+ def _generate_html_report(self, stat: dict[str, Any], now: datetime):
+ """
+ 生成HTML格式的统计报告
+ :param stat: 统计数据
+ :param now: 基准当前时间
+ :return: HTML格式的统计报告
+ """
+
+ tab_list = [
+ f''
+ for period in self.stat_period
+ ]
+
+ def _format_stat_data(stat_data: dict[str, Any], div_id: str, start_time: datetime) -> str:
+ """
+ 格式化一个时间段的统计数据到html div块
+ :param stat_data: 统计数据
+ :param div_id: div的ID
+ :param start_time: 统计时间段开始时间
+ """
+ # format总在线时间
+
+ # 生成HTML
+ return f"""
+
+
+ 统计时段:
+ {start_time.strftime("%Y-%m-%d %H:%M:%S")} ~ {now.strftime("%Y-%m-%d %H:%M:%S")}
+
+
总在线时间: {_format_online_time(stat_data[ONLINE_TIME])}
+
总消息数: {stat_data[TOTAL_MSG_CNT]}
+
总请求数: {stat_data[TOTAL_REQ_CNT]}
+
总花费: {stat_data[TOTAL_COST]:.4f} ¥
+
+
按模型分类统计
+
+ | 模型名称 | 调用次数 | 输入Token | 输出Token | Token总量 | 累计花费 |
+
+ {
+ "\n".join(
+ [
+ f""
+ f"| {model_name} | "
+ f"{count} | "
+ f"{stat_data[IN_TOK_BY_MODEL][model_name]} | "
+ f"{stat_data[OUT_TOK_BY_MODEL][model_name]} | "
+ f"{stat_data[TOTAL_TOK_BY_MODEL][model_name]} | "
+ f"{stat_data[COST_BY_MODEL][model_name]:.4f} ¥ | "
+ f"
"
+ for model_name, count in sorted(stat_data[REQ_CNT_BY_MODEL].items())
+ ]
+ )
+ }
+
+
+
+
按请求类型分类统计
+
+
+ | 请求类型 | 调用次数 | 输入Token | 输出Token | Token总量 | 累计花费 |
+
+
+ {
+ "\n".join(
+ [
+ f""
+ f"| {req_type} | "
+ f"{count} | "
+ f"{stat_data[IN_TOK_BY_TYPE][req_type]} | "
+ f"{stat_data[OUT_TOK_BY_TYPE][req_type]} | "
+ f"{stat_data[TOTAL_TOK_BY_TYPE][req_type]} | "
+ f"{stat_data[COST_BY_TYPE][req_type]:.4f} ¥ | "
+ f"
"
+ for req_type, count in sorted(stat_data[REQ_CNT_BY_TYPE].items())
+ ]
+ )
+ }
+
+
+
+
按用户分类统计
+
+
+ | 用户名称 | 调用次数 | 输入Token | 输出Token | Token总量 | 累计花费 |
+
+
+ {
+ "\n".join(
+ [
+ f""
+ f"| {user_id} | "
+ f"{count} | "
+ f"{stat_data[IN_TOK_BY_USER][user_id]} | "
+ f"{stat_data[OUT_TOK_BY_USER][user_id]} | "
+ f"{stat_data[TOTAL_TOK_BY_USER][user_id]} | "
+ f"{stat_data[COST_BY_USER][user_id]:.4f} ¥ | "
+ f"
"
+ for user_id, count in sorted(stat_data[REQ_CNT_BY_USER].items())
+ ]
+ )
+ }
+
+
+
+
聊天消息统计
+
+
+ | 联系人/群组名称 | 消息数量 |
+
+
+ {
+ "\n".join(
+ [
+ f"| {self.name_mapping[chat_id][0]} | {count} |
"
+ for chat_id, count in sorted(stat_data[MSG_CNT_BY_CHAT].items())
+ ]
+ )
+ }
+
+
+
+ """
+
+ tab_content_list = [
+ _format_stat_data(stat[period[0]], period[0], now - period[1])
+ for period in self.stat_period
+ if period[0] != "all_time"
+ ]
+
+ tab_content_list.append(
+ _format_stat_data(stat["all_time"], "all_time", datetime.fromtimestamp(local_storage["deploy_time"]))
+ )
+
+ html_template = (
+ """
+
+
+
+
+
+ MaiBot运行统计报告
+
+
+
+"""
+ + f"""
+
+
MaiBot运行统计报告
+
统计截止时间: {now.strftime("%Y-%m-%d %H:%M:%S")}
+
+
+ {"\n".join(tab_list)}
+
+
+ {"\n".join(tab_content_list)}
+
+"""
+ + """
+
+
+
+ """
+ )
+
+ with open(self.record_file_path, "w", encoding="utf-8") as f:
+ f.write(html_template)
From f473a888c4eca7c7a0d13b2f40df2220023e7fe7 Mon Sep 17 00:00:00 2001
From: Oct-autumn
Date: Mon, 12 May 2025 01:06:39 +0800
Subject: [PATCH 20/20] =?UTF-8?q?feat:=20=E9=81=A5=E6=B5=8B=E6=A8=A1?=
=?UTF-8?q?=E5=9D=97=E9=87=8D=E6=9E=84?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.gitignore | 1 -
src/main.py | 7 +-
src/plugins/remote/__init__.py | 4 -
src/plugins/remote/remote.py | 326 +++++++++++----------------------
4 files changed, 115 insertions(+), 223 deletions(-)
delete mode 100644 src/plugins/remote/__init__.py
diff --git a/.gitignore b/.gitignore
index e8a931078..a8c972ab4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,7 +35,6 @@ config/bot_config.toml
config/bot_config.toml.bak
config/lpmm_config.toml
config/lpmm_config.toml.bak
-src/plugins/remote/client_uuid.json
(测试版)麦麦生成人格.bat
(临时版)麦麦开始学习.bat
src/plugins/utils/statistic.py
diff --git a/src/main.py b/src/main.py
index fbb40e3c5..09570a4f2 100644
--- a/src/main.py
+++ b/src/main.py
@@ -3,9 +3,10 @@ import time
from maim_message import MessageServer
+from .plugins.remote.remote import TelemetryHeartBeatTask
from .manager.async_task_manager import async_task_manager
from .plugins.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
-from src.manager.mood_manager import logger, MoodPrintTask, MoodUpdateTask
+from .manager.mood_manager import MoodPrintTask, MoodUpdateTask
from .plugins.schedule.schedule_generator import bot_schedule
from .plugins.emoji_system.emoji_manager import emoji_manager
from .plugins.person_info.person_info import person_info_manager
@@ -18,7 +19,6 @@ from .plugins.storage.storage import MessageStorage
from .config.config import global_config
from .plugins.chat.bot import chat_bot
from .common.logger_manager import get_logger
-from .plugins.remote import heartbeat_thread # noqa: F401
from .individuality.individuality import Individuality
from .common.server import global_server, Server
from rich.traceback import install
@@ -59,6 +59,9 @@ class MainSystem:
# 添加统计信息输出任务
await async_task_manager.add_task(StatisticOutputTask())
+ # 添加遥测心跳任务
+ await async_task_manager.add_task(TelemetryHeartBeatTask())
+
# 启动API服务器
start_api_server()
logger.success("API服务器启动成功")
diff --git a/src/plugins/remote/__init__.py b/src/plugins/remote/__init__.py
deleted file mode 100644
index 4cbce96d1..000000000
--- a/src/plugins/remote/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .remote import main
-
-# 启动心跳线程
-heartbeat_thread = main()
diff --git a/src/plugins/remote/remote.py b/src/plugins/remote/remote.py
index 68b023969..1d26df01b 100644
--- a/src/plugins/remote/remote.py
+++ b/src/plugins/remote/remote.py
@@ -1,248 +1,142 @@
+import asyncio
+
import requests
-import time
-import uuid
import platform
-import os
-import json
-import threading
-import subprocess
# from loguru import logger
from src.common.logger_manager import get_logger
from src.config.config import global_config
+from src.manager.async_task_manager import AsyncTask
+from src.manager.local_store_manager import local_storage
logger = get_logger("remote")
-# --- 使用向上导航的方式定义路径 ---
-
-# 1. 获取当前文件 (remote.py) 所在的目录
-current_dir = os.path.dirname(os.path.abspath(__file__))
-
-# 2. 从当前目录向上导航三级找到项目根目录
-# (src/plugins/remote/ -> src/plugins/ -> src/ -> project_root)
-root_dir = os.path.abspath(os.path.join(current_dir, "..", "..", ".."))
-
-# 3. 定义 data 目录的路径 (位于项目根目录下)
-data_dir = os.path.join(root_dir, "data")
-
-# 4. 定义 UUID 文件在 data 目录下的完整路径
-UUID_FILE = os.path.join(data_dir, "client_uuid.json")
-
-# --- 路径定义结束 ---
+TELEMETRY_SERVER_URL = "http://localhost:8080"
+"""遥测服务地址"""
-# 生成或获取客户端唯一ID
-def get_unique_id():
- # --- 在尝试读写 UUID_FILE 之前确保 data 目录存在 ---
- # 将目录检查和创建逻辑移到这里,在首次需要写入前执行
- try:
- # exist_ok=True 意味着如果目录已存在也不会报错
- os.makedirs(data_dir, exist_ok=True)
- except OSError as e:
- # 处理可能的权限错误等
- logger.error(f"无法创建数据目录 {data_dir}: {e}")
- # 根据你的错误处理逻辑,可能需要在这里返回错误或抛出异常
- # 暂且返回 None 或抛出,避免继续执行导致问题
- raise RuntimeError(f"无法创建必要的数据目录 {data_dir}") from e
- # --- 目录检查结束 ---
+class TelemetryHeartBeatTask(AsyncTask):
+ HEARTBEAT_INTERVAL = 300
- # 检查是否已经有保存的UUID
- if os.path.exists(UUID_FILE):
- try:
- with open(UUID_FILE, "r", encoding="utf-8") as f: # 指定 encoding
- data = json.load(f)
- if "client_id" in data:
- logger.debug(f"从本地文件读取客户端ID: {UUID_FILE}")
- return data["client_id"]
- except (json.JSONDecodeError, IOError) as e:
- logger.warning(f"读取UUID文件 {UUID_FILE} 出错: {e},将生成新的UUID")
- except Exception as e: # 捕捉其他可能的异常
- logger.error(f"读取UUID文件 {UUID_FILE} 时发生未知错误: {e}")
+ def __init__(self):
+ super().__init__(task_name="Telemetry Heart Beat Task", run_interval=self.HEARTBEAT_INTERVAL)
+ self.server_url = TELEMETRY_SERVER_URL
+ """遥测服务地址"""
- # 如果没有保存的UUID或读取出错,则生成新的
- client_id = generate_unique_id()
- logger.info(f"生成新的客户端ID: {client_id}")
+ self.client_uuid = local_storage["mmc_uuid"] if "mmc_uuid" in local_storage else None
+ """客户端UUID"""
- # 保存UUID到文件
- try:
- # 再次确认目录存在 (虽然理论上前面已创建,但更保险)
- os.makedirs(data_dir, exist_ok=True)
- with open(UUID_FILE, "w", encoding="utf-8") as f: # 指定 encoding
- json.dump({"client_id": client_id}, f, indent=4) # 添加 indent 使json可读
- logger.info(f"已保存新生成的客户端ID到本地文件: {UUID_FILE}")
- except IOError as e:
- logger.error(f"保存UUID时出错: {UUID_FILE} - {e}")
- except Exception as e: # 捕捉其他可能的异常
- logger.error(f"保存UUID文件 {UUID_FILE} 时发生未知错误: {e}")
+ self.info_dict = self._get_sys_info()
+ """系统信息字典"""
- return client_id
+ @staticmethod
+ def _get_sys_info() -> dict[str, str]:
+ """获取系统信息"""
+ info_dict = {
+ "os_type": "Unknown",
+ "py_version": platform.python_version(),
+ "mmc_version": global_config.MAI_VERSION,
+ }
+ match platform.system():
+ case "Windows":
+ info_dict["os_type"] = "Windows"
+ case "Linux":
+ info_dict["os_type"] = "Linux"
+ case "Darwin":
+ info_dict["os_type"] = "macOS"
+ case _:
+ info_dict["os_type"] = "Unknown"
-# 生成客户端唯一ID
-def generate_unique_id():
- # 基于机器码生成唯一ID,同一台机器上生成的UUID是固定的,只要机器码不变
- import hashlib
+ return info_dict
- system_info = platform.system()
- machine_code = None
+ async def _req_uuid(self) -> bool:
+ """
+ 向服务端请求UUID(不应在已存在UUID的情况下调用,会覆盖原有的UUID)
+ """
- try:
- if system_info == "Windows":
- # 使用wmic命令获取主机UUID(更稳定)
- result = subprocess.check_output(
- "wmic csproduct get uuid", shell=True, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL
- )
- lines = result.decode(errors="ignore").splitlines()
- # 过滤掉空行和表头,只取有效UUID
- uuids = [line.strip() for line in lines if line.strip() and line.strip().lower() != "uuid"]
- if uuids:
- uuid_val = uuids[0]
- # logger.debug(f"主机UUID: {uuid_val}")
- # 增加无效值判断
- if uuid_val and uuid_val.lower() not in ["to be filled by o.e.m.", "none", "", "standard"]:
- machine_code = uuid_val
- elif system_info == "Linux":
- # 优先读取 /etc/machine-id,其次 /var/lib/dbus/machine-id,取第一个非空且内容有效的
- for path in ["/etc/machine-id", "/var/lib/dbus/machine-id"]:
- if os.path.exists(path):
- with open(path, "r") as f:
- code = f.read().strip()
- # 只要内容非空且不是全0
- if code and set(code) != {"0"}:
- machine_code = code
- break
- elif system_info == "Darwin":
- # macOS: 使用IOPlatformUUID
- result = subprocess.check_output(
- "ioreg -rd1 -c IOPlatformExpertDevice | awk '/IOPlatformUUID/'", shell=True
- )
- uuid_line = result.decode(errors="ignore")
- # 解析出 "IOPlatformUUID" = "xxxx-xxxx-xxxx-xxxx"
- import re
-
- m = re.search(r'"IOPlatformUUID"\s*=\s*"([^"]+)"', uuid_line)
- if m:
- uuid_val = m.group(1)
- logger.debug(f"IOPlatformUUID: {uuid_val}")
- if uuid_val and uuid_val.lower() not in ["to be filled by o.e.m.", "none", "", "standard"]:
- machine_code = uuid_val
- except Exception as e:
- logger.debug(f"获取机器码失败: {e}")
-
- # 如果主板序列号无效,尝试用MAC地址
- if not machine_code:
- try:
- mac = uuid.getnode()
- if (mac >> 40) % 2 == 0: # 不是本地伪造MAC
- machine_code = str(mac)
- except Exception as e:
- logger.debug(f"获取MAC地址失败: {e}")
-
- def md5_to_uuid(md5hex):
- # 将32位md5字符串格式化为8-4-4-4-12的UUID格式
- return f"{md5hex[0:8]}-{md5hex[8:12]}-{md5hex[12:16]}-{md5hex[16:20]}-{md5hex[20:32]}"
-
- if machine_code:
- # print(f"machine_code={machine_code!r}") # 可用于调试
- md5 = hashlib.md5(machine_code.encode("utf-8")).hexdigest()
- uuid_str = md5_to_uuid(md5)
- else:
- uuid_str = str(uuid.uuid4())
-
- unique_id = f"{system_info}-{uuid_str}"
- return unique_id
-
-
-def send_heartbeat(server_url, client_id):
- """向服务器发送心跳"""
- sys = platform.system()
- try:
- headers = {"Client-ID": client_id, "User-Agent": f"HeartbeatClient/{client_id[:8]}"}
- data = json.dumps(
- {"system": sys, "Version": global_config.MAI_VERSION},
- )
- logger.debug(f"正在发送心跳到服务器: {server_url}")
- logger.debug(f"心跳数据: {data}")
- response = requests.post(f"{server_url}/api/clients", headers=headers, data=data)
-
- if response.status_code == 201:
- data = response.json()
- logger.debug(f"心跳发送成功。服务器响应: {data}")
- return True
- else:
- logger.debug(f"心跳发送失败。状态码: {response.status_code}, 响应内容: {response.text}")
+ if "deploy_time" not in local_storage:
+ logger.error("本地存储中缺少部署时间,无法请求UUID")
return False
- except requests.RequestException as e:
- # 如果请求异常,可能是网络问题,不记录错误
- logger.debug(f"发送心跳时出错: {e}")
- return False
+ try_count: int = 0
+ while True:
+ # 如果不存在,则向服务端请求一个新的UUID(注册客户端)
+ logger.info("正在向遥测服务端请求UUID...")
+ try:
+ response = requests.post(
+ f"{TELEMETRY_SERVER_URL}/stat/reg_client",
+ json={"deploy_time": local_storage["deploy_time"]},
+ )
-class HeartbeatThread(threading.Thread):
- """心跳线程类"""
+ if response.status_code == 200:
+ data = response.json()
+ client_id = data.get("mmc_uuid")
+ if client_id:
+ # 将UUID存储到本地
+ local_storage["mmc_uuid"] = client_id
+ self.client_uuid = client_id
+ logger.info(f"成功获取UUID: {self.client_uuid}")
+ return True # 成功获取UUID,返回True
+ else:
+ logger.error("无效的服务端响应")
+ else:
+ logger.error(f"请求UUID失败,状态码: {response.status_code}, 响应内容: {response.text}")
+ except requests.RequestException as e:
+ logger.error(f"请求UUID时出错: {e}") # 可能是网络问题
- def __init__(self, server_url, interval):
- super().__init__(daemon=True) # 设置为守护线程,主程序结束时自动结束
- self.server_url = server_url
- self.interval = interval
- self.client_id = get_unique_id()
- self.running = True
- self.stop_event = threading.Event() # 添加事件对象用于可中断的等待
- self.last_heartbeat_time = 0 # 记录上次发送心跳的时间
-
- def run(self):
- """线程运行函数"""
- logger.debug(f"心跳线程已启动,客户端ID: {self.client_id}")
-
- while self.running:
- # 发送心跳
- if send_heartbeat(self.server_url, self.client_id):
- logger.info(f"{self.interval}秒后发送下一次心跳...")
+ # 请求失败,重试次数+1
+ try_count += 1
+ if try_count > 3:
+ # 如果超过3次仍然失败,则退出
+ logger.error("获取UUID失败,请检查网络连接或服务端状态")
+ return False
else:
- logger.info(f"{self.interval}秒后重试...")
+ # 如果可以重试,等待后继续(指数退避)
+ await asyncio.sleep(4**try_count)
- self.last_heartbeat_time = time.time()
+ async def _send_heartbeat(self):
+ """向服务器发送心跳"""
+ try:
+ headers = {
+ "Client-UUID": self.client_uuid,
+ "User-Agent": f"HeartbeatClient/{self.client_uuid[:8]}",
+ }
- # 使用可中断的等待代替 sleep
- # 每秒检查一次是否应该停止或发送心跳
- remaining_wait = self.interval
- while remaining_wait > 0 and self.running:
- # 每次最多等待1秒,便于及时响应停止请求
- wait_time = min(1, remaining_wait)
- if self.stop_event.wait(wait_time):
- break # 如果事件被设置,立即退出等待
- remaining_wait -= wait_time
+ logger.debug(f"正在发送心跳到服务器: {self.server_url}")
- # 检查是否由于外部原因导致间隔异常延长
- if time.time() - self.last_heartbeat_time >= self.interval * 1.5:
- logger.warning("检测到心跳间隔异常延长,立即发送心跳")
- break
+ response = requests.post(
+ f"{self.server_url}/stat/client_heartbeat",
+ headers=headers,
+ json=self.info_dict,
+ )
- def stop(self):
- """停止线程"""
- self.running = False
- self.stop_event.set() # 设置事件,中断等待
- logger.debug("心跳线程已收到停止信号")
+ # 处理响应
+ if 200 <= response.status_code < 300:
+ # 成功
+ logger.debug(f"心跳发送成功,状态码: {response.status_code}")
+ elif response.status_code == 403:
+ # 403 Forbidden
+ logger.error(
+ "心跳发送失败,403 Forbidden: 可能是UUID无效或未注册。"
+ "处理措施:重置UUID,下次发送心跳时将尝试重新注册。"
+ )
+ self.client_uuid = None
+ del local_storage["mmc_uuid"] # 删除本地存储的UUID
+ else:
+ # 其他错误
+ logger.error(f"心跳发送失败,状态码: {response.status_code}, 响应内容: {response.text}")
+ except requests.RequestException as e:
+ logger.error(f"心跳发送失败: {e}")
-def main():
- if global_config.remote_enable:
- """主函数,启动心跳线程"""
- # 配置
- server_url = "http://hyybuth.xyz:10058"
- # server_url = "http://localhost:10058"
- heartbeat_interval = 300 # 5分钟(秒)
+ async def run(self):
+ # 发送心跳
+ if global_config.remote_enable:
+ if self.client_uuid is None:
+ if not await self._req_uuid():
+ logger.error("获取UUID失败,跳过此次心跳")
+ return
- # 创建并启动心跳线程
- heartbeat_thread = HeartbeatThread(server_url, heartbeat_interval)
- heartbeat_thread.start()
-
- return heartbeat_thread # 返回线程对象,便于外部控制
- return None
-
-
-# --- 测试用例 ---
-if __name__ == "__main__":
- print("测试唯一ID生成:")
- print("唯一ID:", get_unique_id())
+ await self._send_heartbeat()