Compare commits

..

No commits in common. "develop" and "v4.2.1" have entirely different histories.

292 changed files with 23259 additions and 31237 deletions

View File

@ -8,8 +8,10 @@
!/glances/outputs/static
# Include Requirements files
!/all-requirements.txt
!/requirements.txt
!/docker-requirements.txt
!/webui-requirements.txt
!/optional-requirements.txt
# Include Config file
!/docker-compose/glances.conf
@ -17,6 +19,3 @@
# Include Binary file
!/docker-bin.sh
# Include TOML file
!/pyproject.toml

View File

@ -1,82 +1,170 @@
# This pipeline aims at building Glances Pypi packages
# This pipeline aims at building Glances for the following targets:
# - Pypi
# - Docker Hub
name: build
env:
DEFAULT_DOCKER_IMAGE: nicolargo/glances
NODE_ENV: ${{ (contains('refs/heads/master', github.ref) || startsWith(github.ref, 'refs/tags/v')) && 'prod' || 'dev' }}
PUSH_BRANCH: ${{ 'refs/heads/develop' == github.ref || 'refs/heads/master' == github.ref || startsWith(github.ref, 'refs/tags/v') }}
# Alpine image platform: https://hub.docker.com/_/alpine
# linux/arm/v6,linux/arm/v7 do not work (timeout during the build)
DOCKER_PLATFORMS: linux/amd64,linux/arm64/v8
# Ubuntu image platforms list: https://hub.docker.com/_/ubuntu
# linux/arm/v7 do not work (Cargo/Rust not available)
DOCKER_PLATFORMS_UBUNTU: linux/amd64,linux/arm64/v8
on:
workflow_call:
secrets:
TEST_PYPI_API_TOKEN:
description: 'Test PyPI API token'
required: true
PYPI_API_TOKEN:
description: 'PyPI API token'
required: true
DOCKER_USERNAME:
description: 'Docker Hub username'
required: true
DOCKER_TOKEN:
description: 'Docker Hub token'
required: true
jobs:
build:
name: Build distribution 📦
if: github.event_name == 'push'
pypi:
runs-on: ubuntu-latest
permissions:
attestations: write
id-token: write
steps:
- uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.13"
- name: Install pypa/build
- uses: actions/checkout@v4
- name: Upgrade pip
run: >-
python3 -m
python -m
pip install
--upgrade
pip
- name: Install build tools
run: >-
python -m
pip install
build
--user
- name: Build a binary wheel and a source tarball
run: python3 -m build
- name: Store the distribution packages
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
run: >-
python -m
build
--sdist
--wheel
--outdir dist/
pypi:
name: Publish Python 🐍 distribution 📦 to PyPI
if: startsWith(github.ref, 'refs/tags')
needs:
- build
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/glances
permissions:
attestations: write
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
attestations: false
print-hash: true
pypi_test:
name: Publish Python 🐍 distribution 📦 to TestPyPI
if: github.ref == 'refs/heads/develop'
needs:
- build
runs-on: ubuntu-latest
environment:
name: testpypi
url: https://pypi.org/p/glances
permissions:
attestations: write
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to TestPyPI
- name: Publish distribution package to Test PyPI
if: github.ref == 'refs/heads/develop'
uses: pypa/gh-action-pypi-publish@release/v1
with:
repository-url: https://test.pypi.org/legacy/
skip-existing: true
attestations: false
- name: Publish distribution package to PyPI
if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
attestations: false
create_Docker_builds:
runs-on: ubuntu-latest
# Make sure we release the python package first. So we are sure to get the latest.
needs:
- pypi
outputs:
tags: ${{ steps.config.outputs.tags }}
steps:
- name: Determine image tags
id: config
shell: bash
run: |
TAG_ARRAY='['
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/v}
TAG_ARRAY="$TAG_ARRAY { \"target\": \"minimal\", \"tag\": \"${VERSION}\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"${VERSION}-full\" },"
elif [[ $GITHUB_REF == refs/heads/develop ]]; then
TAG_ARRAY="$TAG_ARRAY { \"target\": \"dev\", \"tag\": \"dev\" },"
else
TAG_ARRAY="$TAG_ARRAY { \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" },"
fi
TAG_ARRAY="${TAG_ARRAY::-1} ]"
echo "Tags to build: $TAG_ARRAY"
echo "tags=$TAG_ARRAY" >> $GITHUB_OUTPUT
build_Docker_image:
runs-on: ubuntu-latest
needs:
- create_Docker_builds
strategy:
fail-fast: false
matrix:
os: ['alpine', 'ubuntu']
tag: ${{ fromJson(needs.create_Docker_builds.outputs.tags) }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Retrieve Repository Docker metadata
id: docker_meta
uses: docker/metadata-action@v5
with:
images: ${{ env.DEFAULT_DOCKER_IMAGE }}
labels: |
org.opencontainers.image.url=https://nicolargo.github.io/glances/
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
if: ${{ env.PUSH_BRANCH == 'true' }}
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and push image
uses: docker/build-push-action@v5
with:
push: ${{ env.PUSH_BRANCH == 'true' }}
tags: "${{ env.DEFAULT_DOCKER_IMAGE }}:${{ matrix.os != 'alpine' && format('{0}-', matrix.os) || '' }}${{ matrix.tag.tag }}"
build-args: |
CHANGING_ARG=${{ github.sha }}
context: .
file: "docker-files/${{ matrix.os }}.Dockerfile"
platforms: ${{ matrix.os != 'ubuntu' && env.DOCKER_PLATFORMS || env.DOCKER_PLATFORMS_UBUNTU }}
target: ${{ matrix.tag.target }}
labels: ${{ steps.docker_meta.outputs.labels }}
# GHA default behaviour overwrites last build cache. Causes alpine and ubuntu cache to overwrite each other.
# Use `scope` with the os name to prevent that
cache-from: 'type=gha,scope=${{ matrix.os }}'
cache-to: 'type=gha,mode=max,scope=${{ matrix.os }}'

View File

@ -1,105 +0,0 @@
# This pipeline aims at building Glances Docker images
name: build_docker
env:
DEFAULT_DOCKER_IMAGE: nicolargo/glances
PUSH_BRANCH: ${{ 'refs/heads/develop' == github.ref || startsWith(github.ref, 'refs/tags/v') }}
# Alpine image platform: https://hub.docker.com/_/alpine
# linux/arm/v6,linux/arm/v7 do not work (timeout during the build)
DOCKER_PLATFORMS: linux/amd64,linux/arm64/v8
# Ubuntu image platforms list: https://hub.docker.com/_/ubuntu
# linux/arm/v7 do not work (Cargo/Rust not available)
DOCKER_PLATFORMS_UBUNTU: linux/amd64,linux/arm64/v8
on:
workflow_call:
secrets:
DOCKER_USERNAME:
description: 'Docker Hub username'
required: true
DOCKER_TOKEN:
description: 'Docker Hub token'
required: true
jobs:
create_docker_images_list:
runs-on: ubuntu-latest
outputs:
tags: ${{ steps.config.outputs.tags }}
steps:
- name: Determine image tags
id: config
shell: bash
run: |
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/v}
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"${VERSION}\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"${VERSION}-full\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
elif [[ $GITHUB_REF == refs/heads/develop ]]; then
TAG_ARRAY="[{ \"target\": \"dev\", \"tag\": \"dev\" }]"
else
TAG_ARRAY="[]"
fi
echo "Tags to build: $TAG_ARRAY"
echo "tags=$TAG_ARRAY" >> $GITHUB_OUTPUT
build_docker_images:
runs-on: ubuntu-latest
needs:
- create_docker_images_list
strategy:
fail-fast: false
matrix:
os: ['alpine', 'ubuntu']
tag: ${{ fromJson(needs.create_docker_images_list.outputs.tags) }}
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Retrieve Repository Docker metadata
id: docker_meta
uses: docker/metadata-action@v5
with:
images: ${{ env.DEFAULT_DOCKER_IMAGE }}
labels: |
org.opencontainers.image.url=https://nicolargo.github.io/glances/
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
with:
version: latest
- name: Login to DockerHub
uses: docker/login-action@v3
if: ${{ env.PUSH_BRANCH == 'true' }}
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_TOKEN }}
- name: Build and push image
uses: docker/build-push-action@v6
with:
push: ${{ env.PUSH_BRANCH == 'true' }}
tags: "${{ env.DEFAULT_DOCKER_IMAGE }}:${{ matrix.os != 'alpine' && format('{0}-', matrix.os) || '' }}${{ matrix.tag.tag }}"
build-args: |
CHANGING_ARG=${{ github.sha }}
context: .
file: "docker-files/${{ matrix.os }}.Dockerfile"
platforms: ${{ matrix.os != 'ubuntu' && env.DOCKER_PLATFORMS || env.DOCKER_PLATFORMS_UBUNTU }}
target: ${{ matrix.tag.target }}
labels: ${{ steps.docker_meta.outputs.labels }}
# GHA default behaviour overwrites last build cache. Causes alpine and ubuntu cache to overwrite each other.
# Use `scope` with the os name to prevent that
cache-from: 'type=gha,scope=${{ matrix.os }}'
cache-to: 'type=gha,mode=max,scope=${{ matrix.os }}'

View File

@ -17,11 +17,9 @@ jobs:
build:
if: github.event_name != 'pull_request'
uses: ./.github/workflows/build.yml
needs: [quality, test]
build_docker:
if: github.event_name != 'pull_request'
uses: ./.github/workflows/build_docker.yml
secrets:
TEST_PYPI_API_TOKEN: ${{ secrets.TEST_PYPI_API_TOKEN }}
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_TOKEN: ${{ secrets.DOCKER_TOKEN }}
needs: [quality, test]

View File

@ -11,7 +11,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Run Trivy vulnerability scanner in repo mode
uses: aquasecurity/trivy-action@master

View File

@ -10,7 +10,7 @@ jobs:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v10
- uses: actions/stale@v5
with:
days-before-issue-stale: 90
days-before-issue-close: -1

View File

@ -22,7 +22,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@ -8,10 +8,10 @@ on:
jobs:
source-code-checks:
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Check formatting with Ruff
uses: chartboost/ruff-action@v1
@ -23,101 +23,103 @@ jobs:
with:
args: 'check'
# - name: Static type check
# run: |
# echo "Skipping static type check for the moment, too much error...";
# # pip install pyright
# # pyright glances
- name: Static type check
run: |
echo "Skipping static type check for the moment, too much error...";
# pip install pyright
# pyright glances
test-linux:
needs: source-code-checks
# https://github.com/actions/runner-images?tab=readme-ov-file#available-images
runs-on: ubuntu-24.04
runs-on: ubuntu-22.04
strategy:
matrix:
# Python EOL version are note tested
# Multiple Python version only tested for Linux
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
if [ -f dev-requirements.txt ]; then python -m pip install -r dev-requirements.txt; fi
if [ -f requirements.txt ]; then python -m pip install -r requirements.txt; fi
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Unitary tests
run: |
python -m pytest ./tests/test_core.py
python ./unittest-core.py
test-windows:
# Error appear with h11, not related to Glances
# Should be tested if correction is done
# Installed c:\hostedtoolcache\windows\python\3.9.13\x64\lib\site-packages\exceptiongroup-1.2.1-py3.9.egg
# error: h11 0.14.0 is installed but h11<0.13,>=0.11 is required by {'httpcore'}
# Error: Process completed with exit code 1.
# test-windows:
needs: source-code-checks
# https://github.com/actions/runner-images?tab=readme-ov-file#available-images
runs-on: windows-2025
strategy:
matrix:
# Windows-curses not available for Python 3.14 for the moment
# See https://github.com/zephyrproject-rtos/windows-curses/issues/76
python-version: ["3.13"]
steps:
# # https://github.com/actions/runner-images?tab=readme-ov-file#available-images
# runs-on: windows-2022
# strategy:
# matrix:
# # Python version "3.12" introduce this issue:
# # https://github.com/nicolargo/glances/actions/runs/6439648370/job/17487567454
# python-version: ["3.8", "3.9", "3.10", "3.11"]
# steps:
- uses: actions/checkout@v5
# - uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
# - name: Set up Python ${{ matrix.python-version }}
# uses: actions/setup-python@v5
# with:
# python-version: ${{ matrix.python-version }}
# cache: 'pip'
- name: Install dependencies
run: |
if (Test-Path -PathType Leaf "dev-requirements.txt") { python -m pip install -r dev-requirements.txt }
if (Test-Path -PathType Leaf "requirements.txt") { python -m pip install -r requirements.txt }
pip install .
# - name: Install dependencies
# run: |
# python -m pip install --upgrade pip
# if (Test-Path -PathType Leaf "requirements.txt") { python -m pip install -r requirements.txt }
# python setup.py install
- name: Unitary tests
run: |
python -m pytest ./tests/test_core.py
# - name: Unitary tests
# run: |
# python ./unittest-core.py
test-macos:
needs: source-code-checks
# https://github.com/actions/runner-images?tab=readme-ov-file#available-images
runs-on: macos-15
runs-on: macos-14
strategy:
matrix:
# Only test the latest stable version
python-version: ["3.14"]
python-version: ["3.13"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
- name: Install dependencies
run: |
if [ -f dev-requirements.txt ]; then python -m pip install -r dev-requirements.txt; fi
if [ -f requirements.txt ]; then python -m pip install -r requirements.txt; fi
python -m pip install --upgrade pip
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Unitary tests
run: |
python -m pytest ./tests/test_core.py
python ./unittest-core.py
# Error when trying to implement #2749
# pkg: No packages available to install matching 'py-pip' have been found in the repositories
@ -134,10 +136,8 @@ jobs:
# with:
# usesh: true
# prepare: |
# pkg install -y python3
# pkg install -y python3 py-pip
# run: |
# set -e -x
# python3 -m pip install pytest
# python3 -m pip install --user -r dev-requirements.txt
# python3 -m pip install --user -r requirements.txt
# python3 -m pytest ./tests/test_core.py
# python ./unittest-core.py

View File

@ -14,9 +14,9 @@ jobs:
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Glances will be build with Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v5
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'

12
.gitignore vendored
View File

@ -23,7 +23,6 @@ local.properties
.classpath
.settings/
.loadpath
.ipynb_checkpoints/
# External tool builders
.externalToolBuilders/
@ -64,14 +63,5 @@ bower_components/
/*_source.tar.bz2
# Virtual env
.venv-uv/
.venv/
uv.lock
.python-version
/venv*/
# Test
.coverage
tests-data/issues/*/config/
# Local SSL certificates
glances.local*.pem

View File

@ -1,107 +1,22 @@
repos:
- repo: https://github.com/gitleaks/gitleaks
rev: v8.24.2
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: gitleaks
name: "🔒 security · Detect hardcoded secrets"
- id: check-ast
- id: check-docstring-first
- id: check-json
- id: check-merge-conflict
- id: check-shebang-scripts-are-executable
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: detect-private-key
- id: mixed-line-ending
- id: requirements-txt-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.10
rev: v0.4.4
hooks:
- id: ruff-format
name: "🐍 python · Formatter with Ruff"
types_or: [ python, pyi ]
args: [ --config, './pyproject.toml' ]
- id: ruff-check
name: "🐍 python · Linter with Ruff"
types_or: [ python, pyi ]
args: [ --fix, --exit-non-zero-on-fix, --config, './pyproject.toml' ]
# - repo: https://github.com/RobertCraigie/pyright-python
# rev: v1.1.391
# hooks:
# - id: pyright
# name: "🐍 python · Check types"
# - repo: https://github.com/biomejs/pre-commit
# rev: "v2.3.7"
# hooks:
# - id: biome-check
# name: "🟨 javascript · Lint, format, and safe fixes with Biome"
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.35.0
hooks:
- id: check-github-workflows
name: "🐙 github-actions · Validate gh workflow files"
args: ["--verbose"]
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.11.0.1
hooks:
- id: shellcheck
name: "🐚 shell · Lint shell scripts"
- repo: https://github.com/openstack/bashate
rev: 2.1.1
hooks:
- id: bashate
name: "🐚 shell · Check shell script code style"
entry: bashate --error . --ignore=E006
# - repo: https://github.com/mrtazz/checkmake.git
# rev: 0.2.2
# hooks:
# - id: checkmake
# name: "🐮 Makefile · Lint Makefile"
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-executables-have-shebangs
name: "📁 filesystem/⚙️ exec · Verify shebang presence"
- id: check-shebang-scripts-are-executable
name: "📁 filesystem/⚙️ exec · Verify script permissions"
- id: check-case-conflict
name: "📁 filesystem/📝 names · Check case sensitivity"
- id: destroyed-symlinks
name: "📁 filesystem/🔗 symlink · Detect broken symlinks"
- id: check-merge-conflict
name: "🌳 git · Detect conflict markers"
- id: forbid-new-submodules
name: "🌳 git · Prevent submodule creation"
- id: no-commit-to-branch
name: "🌳 git · Protect main branches"
args: ["--branch", "main", "--branch", "master"]
- id: check-added-large-files
name: "🌳 git · Block large file commits"
args: ['--maxkb=5000']
- id: check-ast
name: "🐍 python/🔍 quality · Validate Python AST"
- id: check-docstring-first
name: "🐍 python/📝 style · Enforce docstring at top"
- id: check-json
name: "📄 formats/json · Validate JSON files"
- id: check-shebang-scripts-are-executable
name: "📁 filesystem/⚙️ exec · Ensure scripts are executable"
- id: check-toml
name: "📄 formats/toml · Validate TOML files"
- id: check-yaml
name: "📄 formats/yaml · Validate YAML syntax"
- id: debug-statements
name: "🐍 python/🪲 debug · Detect debug statements"
- id: detect-private-key
name: "🔐 security · Detect private keys"
- id: mixed-line-ending
name: "📄 text/↩️ newline · Normalize line endings"
- id: requirements-txt-fixer
name: "🐍 python/📦 deps · Sort requirements.txt"
- repo: local
hooks:
- id: find-duplicate-lines
name: "❗local script · Find duplicate lines at the end of file"
entry: bash tests-data/tools/find-duplicate-lines.sh
language: system
types: [python]
pass_filenames: false
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]

View File

@ -31,4 +31,4 @@ sphinx:
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: dev-requirements.txt
- requirements: doc-requirements.txt

View File

@ -13,9 +13,6 @@ PGP Public key: gpg --keyserver pgp.mit.edu --recv-keys 0xba43c11f2c8b4347
RazCrimson (maintainer of the Glances project)
https://github.com/RazCrimson
Ariel Otibili (aka) ariel-anieli (for the huge work on code quality)
https://github.com/ariel-anieli
Alessio Sergi (aka) Al3hex (thanks you for the great job on this project)
https://twitter.com/al3hex
https://github.com/asergi

View File

@ -3,12 +3,9 @@ include CONTRIBUTING.md
include COPYING
include NEWS.rst
include README.rst
include README-pypi.rst
include SECURITY.md
include conf/glances.conf
include conf/fetch-templates/*.jinja
include requirements.txt
include all-requirements.txt
recursive-include docs *
recursive-include glances *.py
recursive-include glances/outputs/static *

271
Makefile
View File

@ -1,8 +1,19 @@
PORT ?= 8008
CONF := conf/glances.conf
LASTTAG = $(shell git describe --tags --abbrev=0)
PORT ?= 8008
venv_full:= venv/bin
venv_dev := venv-dev/bin
venv_min := venv-min/bin
CONF := conf/glances.conf
PIP := $(venv_full)/pip
PYTHON := $(venv_full)/python
LASTTAG = $(shell git describe --tags --abbrev=0)
IMAGES_TYPES := full minimal
VENV_TYPES := full min dev
VENV_PYTHON := $(VENV_TYPES:%=venv-%-python)
VENV_UPG := $(VENV_TYPES:%=venv-%-upgrade)
VENV_DEPS := $(VENV_TYPES:%=venv-%)
VENV_INST_UPG := $(VENV_DEPS) $(VENV_UPG)
IMAGES_TYPES := full minimal dev
DISTROS := alpine ubuntu
alpine_images := $(IMAGES_TYPES:%=docker-alpine-%)
ubuntu_images := $(IMAGES_TYPES:%=docker-ubuntu-%)
@ -15,155 +26,105 @@ PODMAN_SOCK ?= /run/user/$(shell id -u)/podman/podman.sock
DOCKER_SOCK ?= /var/run/docker.sock
DOCKER_SOCKS := -v $(PODMAN_SOCK):$(PODMAN_SOCK):ro -v $(DOCKER_SOCK):$(DOCKER_SOCK):ro
DOCKER_OPTS := --rm -e TZ="${TZ}" -e GLANCES_OPT="" --pid host --network host
UV_RUN := .venv-uv/bin/uv
# if the command is only `make`, the default tasks will be the printing of the help.
.DEFAULT_GOAL := help
.PHONY: help test docs docs-server venv requirements profiling docker all clean all test
.PHONY: help test docs docs-server venv venv-min venv-dev
help: ## List all make commands available
@grep -E '^[\.a-zA-Z_%-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk -F ":" '{print $1}' | \
grep -v % | sed 's/\\//g' | sort | \
grep -v % | \
sed 's/\\//g' | \
sort | \
awk 'BEGIN {FS = ":[^:]*?##"}; {printf "\033[1;34mmake %-50s\033[0m %s\n", $$1, $$2}'
# ===================================================================
# Virtualenv
# ===================================================================
# install-uv: ## Instructions to install the UV tool
# @echo "Install the UV tool (https://astral.sh/uv/)"
# @echo "Please install the UV tool manually"
# @echo "For example with: curl -LsSf https://astral.sh/uv/install.sh | sh"
# @echo "Or via a package manager of your distribution"
# @echo "For example for Snap: snap install astral-uv"
venv-%-upgrade: UPGRADE = --upgrade
install-uv: ## Install UV tool in a specific virtualenv
python3 -m venv .venv-uv
.venv-uv/bin/pip install uv
define DEFINE_VARS_FOR_TYPE
venv-$(TYPE) venv-$(TYPE)-upgrade: VIRTUAL_ENV = $(venv_$(TYPE))
endef
upgrade-uv: ## Upgrade the UV tool
.venv-uv/bin/pip install --upgrade pip
.venv-uv/bin/pip install --upgrade uv
$(foreach TYPE,$(VENV_TYPES),$(eval $(DEFINE_VARS_FOR_TYPE)))
venv: ## Create the virtualenv with all dependencies
$(UV_RUN) sync --all-extras --no-group dev
$(VENV_PYTHON): venv-%-python:
virtualenv -p /usr/bin/python3 $(if $(filter full,$*),venv,venv-$*)
venv-upgrade venv-switch-to-full: ## Upgrade the virtualenv with all dependencies
$(UV_RUN) sync --upgrade --all-extras
$(VENV_INST_UPG): venv-%:
$(if $(UPGRADE),$(VIRTUAL_ENV)/pip install --upgrade pip,)
$(foreach REQ,$(REQS), $(VIRTUAL_ENV)/pip install $(UPGRADE) -r $(REQ);)
$(if $(PRE_COMMIT),$(VIRTUAL_ENV)/pre-commit install --hook-type pre-commit,)
venv-min: ## Create the virtualenv with minimal dependencies
$(UV_RUN) sync
venv-python: $(VENV_PYTHON) ## Install all Python 3 venv
venv: $(VENV_DEPS) ## Install all Python 3 dependencies
venv-upgrade: $(VENV_UPG) ## Upgrade all Python 3 dependencies
venv-upgrade-min venv-switch-to-min: ## Upgrade the virtualenv with minimal dependencies
$(UV_RUN) sync --upgrade
# For full installation (with optional dependencies)
venv-clean: ## Remove the virtualenv
rm -rf .venv
venv-full venv-full-upgrade: REQS = requirements.txt optional-requirements.txt
venv-dev: ## Create the virtualenv with dev dependencies
$(UV_RUN) sync --dev --all-extras
$(UV_RUN) run pre-commit install --hook-type pre-commit
venv-full-python: ## Install Python 3 venv
venv-full: venv-python ## Install Python 3 run-time
venv-full-upgrade: ## Upgrade Python 3 run-time dependencies
# ===================================================================
# Requirements
#
# Note: the --no-hashes option should be used because pip (in CI) has
# issues with hashes.
# ===================================================================
# For minimal installation (without optional dependencies)
requirements-min: ## Generate the requirements.txt files (minimal dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --no-group dev --output-file requirements.txt
venv-min venv-min-upgrade: REQS = requirements.txt
requirements-all: ## Generate the all-requirements.txt files (all dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
venv-min-python: ## Install Python 3 venv minimal
venv-min: venv-min-python ## Install Python 3 minimal run-time dependencies
venv-min-upgrade: ## Upgrade Python 3 minimal run-time dependencies
requirements-docker: ## Generate the docker-requirements.txt files (Docker specific dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
# For development
requirements-dev: ## Generate the dev-requirements.txt files (dev dependencies)
$(UV_RUN) export --no-hashes --only-dev --output-file dev-requirements.txt
venv-dev venv-dev-upgrade: REQS = dev-requirements.txt doc-requirements.txt
venv-dev: PRE_COMMIT = 1
requirements: requirements-min requirements-all requirements-dev requirements-docker ## Generate all the requirements files
requirements-upgrade: venv-upgrade requirements ## Upgrade the virtualenv and regenerate all the requirements files
venv-dev-python: ## Install Python 3 venv
venv-dev: venv-python ## Install Python 3 dev dependencies
venv-dev-upgrade: ## Upgrade Python 3 dev dependencies
# ===================================================================
# Tests
# ===================================================================
test: ## Run All unit tests
$(UV_RUN) run pytest
$(UNIT_TESTS): test-%: unittest-%.py
$(PYTHON) $<
test-core: ## Run Core unit tests
$(UV_RUN) run pytest tests/test_core.py
test-core: ## Run core unit tests
test-restful: ## Run Restful unit tests
test-xmlrpc: ## Run XMLRPC unit tests
test-api: ## Run API unit tests
$(UV_RUN) run pytest tests/test_api.py
test: $(UNIT_TESTS) ## Run unit tests
test-memoryleak: ## Run Memory-leak unit tests
$(UV_RUN) run pytest tests/test_memoryleak.py
test-with-upgrade: venv-upgrade venv-dev-upgrade test ## Upgrade deps and run unit tests
test-perf: ## Run Perf unit tests
$(UV_RUN) run pytest tests/test_perf.py
test-min: ## Run core unit tests in minimal environment
$(venv_min)/python unittest-core.py
test-restful: ## Run Restful API unit tests
$(UV_RUN) run pytest tests/test_restful.py
test-webui: ## Run WebUI unit tests
$(UV_RUN) run pytest tests/test_webui.py
test-xmlrpc: ## Run XMLRPC API unit tests
$(UV_RUN) run pytest tests/test_xmlrpc.py
test-with-upgrade: venv-upgrade test ## Upgrade deps and run unit tests
test-export-csv: ## Run interface tests with CSV
/bin/bash ./tests/test_export_csv.sh
test-export-json: ## Run interface tests with JSON
/bin/bash ./tests/test_export_json.sh
test-export-influxdb-v1: ## Run interface tests with InfluxDB version 1 (Legacy)
/bin/bash ./tests/test_export_influxdb_v1.sh
test-export-influxdb-v3: ## Run interface tests with InfluxDB version 3 (Core)
/bin/bash ./tests/test_export_influxdb_v3.sh
test-export-timescaledb: ## Run interface tests with TimescaleDB
/bin/bash ./tests/test_export_timescaledb.sh
test-export-nats: ## Run interface tests with NATS
/bin/bash ./tests/test_export_nats.sh
test-exports: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb test-export-nats ## Tests all exports
test-min-with-upgrade: venv-min-upgrade ## Upgrade deps and run unit tests in minimal environment
$(venv_min)/python unittest-core.py
# ===================================================================
# Linters, profilers and cyber security
# ===================================================================
pre-commit: ## Run pre-commit hooks
$(UV_RUN) run pre-commit run --all-files
find-duplicate-lines: ## Search for duplicate lines in files
/bin/bash tests-data/tools/find-duplicate-lines.sh
format: ## Format the code
$(UV_RUN) run ruff format .
$(venv_dev)/python -m ruff format .
lint: ## Lint the code.
$(UV_RUN) run ruff check . --fix
lint-readme: ## Lint the main README.rst file
$(UV_RUN) run rstcheck README.rst
$(UV_RUN) run rstcheck README-pypi.rst
$(venv_dev)/python -m ruff check . --fix
codespell: ## Run codespell to fix common misspellings in text files
$(UV_RUN) run codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
$(venv_dev)/codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
semgrep: ## Run semgrep to find bugs and enforce code standards
$(UV_RUN) run semgrep scan --config=auto
$(venv_dev)/semgrep scan --config=auto
profiling-%: SLEEP = 3
profiling-%: TIMES = 30
@ -177,66 +138,57 @@ endef
profiling-gprof: CPROF = glances.cprof
profiling-gprof: ## Callgraph profiling (need "apt install graphviz")
$(DISPLAY-BANNER)
$(UV_RUN) run python -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(UV_RUN) run gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
$(PYTHON) -m cProfile -o $(CPROF) run.py --stop-after $(TIMES)
$(venv_dev)/gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
rm -f $(CPROF)
profiling-pyinstrument: ## PyInstrument profiling
$(DISPLAY-BANNER)
$(UV_RUN) add pyinstrument
$(UV_RUN) run pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
$(PIP) install pyinstrument
$(PYTHON) -m pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances --stop-after $(TIMES)
profiling-pyspy: ## Flame profiling
profiling-pyspy: ## Flame profiling (currently not compatible with Python 3.12)
$(DISPLAY-BANNER)
$(UV_RUN) run py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- .venv-uv/bin/uvrun python run-venv.py -C $(CONF) --stop-after $(TIMES)
$(venv_dev)/py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- $(PYTHON) run.py --stop-after $(TIMES)
profiling: profiling-gprof profiling-pyinstrument profiling-pyspy ## Profiling of the Glances software
trace-malloc: ## Trace the malloc() calls
@echo "Malloc test is running, please wait ~30 secondes..."
$(UV_RUN) run python -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
$(PYTHON) -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
memory-leak: ## Profile memory leaks
$(UV_RUN) run python -m glances -C $(CONF) --memory-leak
$(PYTHON) -m glances -C $(CONF) --memory-leak
memory-profiling: TIMES = 2400
memory-profiling: PROFILE = mprofile_*.dat
memory-profiling: OUT_DIR = docs/_static
memory-profiling: ## Profile memory usage
@echo "It's a very long test (~4 hours)..."
rm -f $(PROFILE)
@echo "1/2 - Start memory profiling with the history option enable"
$(UV_RUN) run mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
$(UV_RUN) run mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
$(venv_dev)/mprof run -T 1 -C run.py -C $(CONF) --stop-after $(TIMES) --quiet
$(venv_dev)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
rm -f $(PROFILE)
@echo "2/2 - Start memory profiling with the history option disable"
$(UV_RUN) run mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(UV_RUN) run mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
$(venv_dev)/mprof run -T 1 -C run.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(venv_dev)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
rm -f $(PROFILE)
# Trivy installation: https://aquasecurity.github.io/trivy/latest/getting-started/installation/
trivy: ## Run Trivy to find vulnerabilities
$(UV_RUN) run trivy fs ./glances/
bandit: ## Run Bandit to find vulnerabilities
$(UV_RUN) run bandit glances -r
trivy: ## Run Trivy to find vulnerabilities in container images
trivy fs .
# ===================================================================
# Docs
# ===================================================================
docs: ## Create the documentation
$(UV_RUN) run python -m glances -C $(CONF) --api-doc > ./docs/api/python.rst
$(UV_RUN) run python ./generate_openapi.py
$(UV_RUN) run python -m glances -C $(CONF) --api-restful-doc > ./docs/api/restful.rst
$(PYTHON) -m glances -C $(CONF) --api-doc > ./docs/api.rst
cd docs && ./build.sh && cd ..
docs-server: docs ## Start a Web server to serve the documentation
(sleep 2 && sensible-browser "http://localhost:$(PORT)") &
cd docs/_build/html/ && .venv-uv/bin/uvrun python -m http.server $(PORT)
docs-jupyter: ## Start Jupyter Notebook
$(UV_RUN) run --with jupyter jupyter lab
cd docs/_build/html/ && ../../../venv/bin/python -m http.server $(PORT)
release-note: ## Generate release note
git --no-pager log $(LASTTAG)..HEAD --first-parent --pretty=format:"* %s"
@ -253,28 +205,22 @@ install: ## Open a Web Browser to the installation procedure
webui webui%: DIR = glances/outputs/static/
webui-gen-config: ## Generate the Web UI config file
$(UV_RUN) run python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
webui: webui-gen-config ## Build the Web UI
webui: ## Build the Web UI
cd $(DIR) && npm ci && npm run build
webui-audit: ## Audit the Web UI
cd $(DIR) && npm audit
webui-audit-fix: webui-gen-config ## Fix audit the Web UI
webui-audit-fix: ## Fix audit the Web UI
cd $(DIR) && npm audit fix && npm ci && npm run build
webui-update: webui-gen-config ## Update JS dependencies
cd $(DIR) && npm update --save && npm ci && npm run build
# ===================================================================
# Packaging
# ===================================================================
flatpak: venv-upgrade ## Generate FlatPack JSON file
flatpak: venv-dev-upgrade ## Generate FlatPack JSON file
git clone https://github.com/flatpak/flatpak-builder-tools.git
$(UV_RUN) run python ./flatpak-builder-tools/pip/flatpak-pip-generator glances
$(PYTHON) ./flatpak-builder-tools/pip/flatpak-pip-generator glances
rm -rf ./flatpak-builder-tools
@echo "Now follow: https://github.com/flathub/flathub/wiki/App-Submission"
@ -308,33 +254,30 @@ docker-ubuntu-full: ## Generate local docker image (Ubuntu full)
docker-ubuntu-minimal: ## Generate local docker image (Ubuntu minimal)
docker-ubuntu-dev: ## Generate local docker image (Ubuntu dev)
trivy-docker: ## Run Trivy to find vulnerabilities in Docker images
$(UV_RUN) run trivy image glances:local-alpine-full
$(UV_RUN) run trivy image glances:local-alpine-minimal
$(UV_RUN) run trivy image glances:local-ubuntu-full
$(UV_RUN) run trivy image glances:local-ubuntu-minimal
# ===================================================================
# Run
# ===================================================================
run: ## Start Glances in console mode (also called standalone)
$(UV_RUN) run python -m glances -C $(CONF)
$(PYTHON) -m glances -C $(CONF)
run-debug: ## Start Glances in debug console mode (also called standalone)
$(UV_RUN) run python -m glances -C $(CONF) -d
$(PYTHON) -m glances -C $(CONF) -d
run-local-conf: ## Start Glances in console mode with the system conf file
$(UV_RUN) run python -m glances
$(PYTHON) -m glances
run-local-conf-hide-public: ## Start Glances in console mode with the system conf file and hide public information
$(UV_RUN) run python -m glances --hide-public-info
$(PYTHON) -m glances --hide-public-info
run-like-htop: ## Start Glances with the same features than Htop
$(UV_RUN) run python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
run-min: ## Start minimal Glances in console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF)
run-fetch: ## Start Glances in fetch mode
$(UV_RUN) run python -m glances --fetch
run-min-debug: ## Start minimal Glances in debug console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF) -d
run-min-local-conf: ## Start minimal Glances in console mode with the system conf file
$(venv_min)/python -m glances
$(DOCKER_RUNTIMES): run-docker-%:
$(DOCKER_RUN) $(DOCKER_OPTS) $(DOCKER_SOCKS) -it glances:local-$*
@ -346,35 +289,29 @@ run-docker-ubuntu-minimal: ## Start Glances Ubuntu Docker minimal in console mod
run-docker-ubuntu-full: ## Start Glances Ubuntu Docker full in console mode
run-docker-ubuntu-dev: ## Start Glances Ubuntu Docker dev in console mode
generate-ssl: ## Generate local and sel signed SSL certificates for dev (need mkcert)
mkcert glances.local localhost 120.0.0.1 0.0.0.0
run-webserver: ## Start Glances in Web server mode
$(UV_RUN) run python -m glances -C $(CONF) -w
$(PYTHON) -m glances -C $(CONF) -w
run-webserver-local-conf: ## Start Glances in Web server mode with the system conf file
$(UV_RUN) run python -m glances -w
$(PYTHON) -m glances -w
run-webserver-local-conf-hide-public: ## Start Glances in Web server mode with the system conf file and hide public info
$(UV_RUN) run python -m glances -w --hide-public-info
$(PYTHON) -m glances -w --hide-public-info
run-restapiserver: ## Start Glances in REST API server mode
$(UV_RUN) run python -m glances -C $(CONF) -w --disable-webui
$(PYTHON) -m glances -C $(CONF) -w --disable-webui
run-server: ## Start Glances in server mode (RPC)
$(UV_RUN) run python -m glances -C $(CONF) -s
$(PYTHON) -m glances -C $(CONF) -s
run-client: ## Start Glances in client mode (RPC)
$(UV_RUN) run python -m glances -C $(CONF) -c localhost
$(PYTHON) -m glances -C $(CONF) -c localhost
run-browser: ## Start Glances in browser mode (RPC)
$(UV_RUN) run python -m glances -C $(CONF) --browser
run-web-browser: ## Start Web Central Browser
$(UV_RUN) run python -m glances -C $(CONF) -w --browser
$(PYTHON) -m glances -C $(CONF) --browser
run-issue: ## Start Glances in issue mode
$(UV_RUN) run python -m glances -C $(CONF) --issue
$(PYTHON) -m glances -C $(CONF) --issue
run-multipass: ## Install and start Glances in a VM (only available on Ubuntu with multipass already installed)
multipass launch -n glances-on-lts lts
@ -384,4 +321,4 @@ run-multipass: ## Install and start Glances in a VM (only available on Ubuntu wi
multipass delete glances-on-lts
show-version: ## Show Glances version number
$(UV_RUN) run python -m glances -C $(CONF) -V
$(PYTHON) -m glances -C $(CONF) -V

312
NEWS.rst
View File

@ -2,302 +2,6 @@
Glances ChangeLog
==============================================================================
=============
Version 4.4.1
=============
Bug corrected:
* Restful API issue after a while (stats are no more updated) #3333
=============
Version 4.4.0
=============
Breaking changes:
* A new Python API is now available to use Glances as a Python lib in your hown development #3237
* In the process list, the long command line is now truncated by default. Use the arrow keys to show the full command line. SHIFT + arrow keys are used to switch between column sorts (TUI).
* Prometheus export format is now more user friendly (see detail in #3283)
Enhancements:
* Make a Glances API in order to use Glances as a Python lib #3237
* Add a new --fetch (neofetch like) option to display a snapshot of the current system status #3281
* Show used port in container section #2054
* Show long command line with arrow key #1553
* Sensors plugin refresh by default every 10 seconds
* Do not call update if a call is done to a specific plugin through the API #3033
* [UI] Process virtual memory display can be disable by configuration #3299
* Choose between used or available in the mem plugin #3288
* [Experimental] Add export to DuckDB database #3205
* Add Disk I/O Latency stats #1070
* Filter fields to export #3258
* Remove .keys() from loops over dicts #3253
* Remove iterator helpers #3252
Bug corrected:
* [MACOS] Glances not showing Processes on MacOS #3100
* Last dev build broke Homepage API calls ? only 1 widget still working #3322
* Cloud plugin always generate communication with 169.254.169.254, even if the plugin is disabled #3316
* API response delay (3+ minutes) when VMs are running #3317
* [WINDOWS] Glances do not display CPU stat correctly #3155
* Glances hangs if network device (NFS) is no available #3290
* Fix prometheus export format #3283
* Issue #3279 zfs cache and memory math issues #3289
* [MACOS] Glances crashes when I try to filter #3266
* Glances hang when killing process with muliple CTRL-C #3264
* Issues after disabling system and processcount plugins #3248
* Headers missing from predefined fields in TUI browser machine list #3250
* Add another check for the famous Netifaces issue - Related to #3219
* Key error 'type' in server_list_static.py (load_server_list) #3247
Continious integration and documentation:
* Glances now use uv for the dev environment #3025
* Glances is compatible with Python 3.14 #3319
* Glances provides requirements files with specific versions for each release
* Requirements files are now generated dynamically with the make requirements or requirements-upgrade target
* Add duplicate line check in pre-commit (strange behavor with some VScode extension)
* Solve issue with multiprocessing exception with Snap package
* Add a test script for identify CPU consumption of sensor plugin
* Refactor port to take into account netifaces2
* Correct issue with Chrome driver in WebUI unit test
* Upgrade export test with InfluxDB 1.12
* Fix typo of --export-process-filter help message #3314
* In the outdated feature, catch error message if Pypi server not reachable
* Add unit test for auto_unit
* Label error in docs #3286
* Put WebUI conf generator in a dedicated script
* Refactor the Makefile to generate WebUI config file for all webui targets
* Update sensors documentation #3275
* Update docker compose env quote #3273
* Update docker-compose.yml #3249
* Update API doc generation
* Update README with nice icons #3236
* Add documentation for WebUI test
Thanks to all contributors and bug reporters !
Special thanks to:
- Adi
- Bennett Kanuka
- Tim Potter
- Ariel Otilibili
- Boris Okassa
- Lawrence
- Shohei YOSHIDA
- jmwallach
- korn3r
=============
Version 4.3.3
=============
Bug corrected:
* Something in 4.3.2 broke the home assistant add-on for Glances #3238
Thanks to the FastAPI and Home Assistant community for the support.
=============
Version 4.3.2
=============
Enhancements:
* Add stats about running VMS (qemu/libvirt/kvm support through virsh) #1531
* Add support for InfluxDB 3 Core #3182
* (postgre)SQL export support / TimeScaleDB #2814
* CSV column name now include the plugin name - Related to #2394
* Make all results from amps plugins exportable #2394
* Make --stdout (csv and json) compliant with client/server mode #3235
* API history endpoints shows times without timezone #3218
* FR: Sort Sensors my name in proper number order #3132
* In the FS module, do not display threshold for volume mounted in 'ro' (read-only) #3143
* Add a new field in the process list to identifie Zombie process #3178
* Update plugin containers display and order #3186
* Implement a basic memory cache with TTL for API call (set to ~1 second) #3202
* Add container inactive_file & limit to InfluxDB2 export #3206
Bug corrected:
* [GPU] AMD Plugin: Operation not permitted #3125
* Container memory stats not displayed #3142
* [WEBUI] Irix mode (per core instead of per CPU percentage) not togglable #3158
* Related to iteritems, itervalues, and iterkeys are not more needed in Python 3 #3181
* Glances Central Browser should use name instead of IP adress for redirection #3103
* Glances breaks if Podman container is started while it is running #3199
Continious integration and documentation:
* Add a new option --print-completion to generate shell tab completion - #3111
* Improve Restful API documentation embeded in FastAPI #2632
* Upgrade JS libs #3147
* Improve unittest for CSV export #3150
* Improve unittest for InfluxDB plugin #3149
* Code refactoring - Rename plugin class to <Plugin name>Plugin instead of PluginModel #3169
* Refactor code to limit the complexity of update_views method in plugins #3171
Thanks to all contributors and bug reporters !
Special thanks to:
- Ariel Otilibili
- kenrmayfield
=============
Version 4.3.1
=============
Enhancements:
* [WebUI] Top processes extended stats and processes filter in Web server mode #410
* I'd like a feature to make the forground color for colored background white #3119
* -disable-bg in ~/.config/glances.conf #3113
* Entry point in the API to get extended process stats #3095
* Replace netifaces by netifaces-plus dependencies #3053
* Replace docker by containers in glances-grafana-flux.json #3118
Bug corrected:
* default_config_dir: Fix config path to include glances/ directory #3106
* Cannot set warning/critical temperature for a specific sensor needs test #3102
* Try to reduce latency between stat's update and view - #3086
* Error on Cloud plugin initialisation make TUI crash #3085
Continious integration:
* Add Selenium to test WebUI #3044
Thanks to all contributors and bug reporters !
Special thanks to:
- Alexander Kuznetsov
- Jonathan Chemla
- mizulike
===============
Version 4.3.0.8
===============
Bug corrected:
* IP plugin broken with Netifaces2 #3076
* WebUI if is notresponsive on mobile #3059 (second run)
===============
Version 4.3.0.7
===============
Bug corrected:
* WebUI if is notresponsive on mobile #3059
===============
Version 4.3.0.6
===============
Bug corrected:
* Browser mode do not working with the sensors plugin #3069
* netifaces is deprecated, use netifaces-plus or netifaces2 #3055
Continuous integration and documentation:
* Update alpine Docker tag to v3.21 #3061
===============
Version 4.3.0.5
===============
Bug corrected:
* WebUI errors in 4.3.0.4 on iPad Air (and Browser with low resolution) #3057
===============
Version 4.3.0.4
===============
Continuous integration and documentation:
* Pin Python version in Ubuntu image to 3.12
===============
Version 4.3.0.3
===============
Continuous integration and documentation:
* Pin Alpine image to 3.20 (3.21 is not compliant with Netifaces) Related to #3053
===============
Version 4.3.0.2
===============
Enhancements:
* Revert "Replace netifaces by netifaces-plus" #3053 because it break build on Alpine Image
===============
Version 4.3.0.1
===============
Enhancements:
* Replace netifaces by netifaces-plus #3053
Bug corrected:
* CONTAINERS section missing in 4.3.0 WebUI #3052
===============
Version 4.3.0
===============
Enhancements:
* Web Based Glances Central Browser #1121
* Ability to specify hide or show for smart plugin #2996
* Thread mode ('j' hotkey) is not taken into accound in the WebUI #3019
* [WEBUI] Clear old alert messages in the WebUI #3042
* Raise an (Alert) Event for a group of sensors #3049
* Allow processlist columns to be selected in config file #1524
* Allow containers columns to be selected in config file #2722
* [WebUI] Unecessary space between Processcount and processlist #3032
* Add comparable NVML_LIB check for Windows #3000
* Change the default path for graph export to /tmp/glances
* Improve CCS of WebUI #3024
Bug corrected:
* Thresholds not displayed in the WebUI for the DiskIO plugin #1498
* FS module alias configuration do not taken into account everytime #3010
* Unexpected behaviour while running glances in docker with --export influxdb2 #2904
* Correct issue when key name contains space - Related to #2983
* Issue with ports plugin (for URL request) #3008
* Network problem when no bitrate available #3014
* SyntaxError: f-string: unmatched '[' in server list (on the DEVELOP branch only) #3018
* Uptime for Docker containers not working #3021
* WebUI doesn't display valid time for process list #2902
* Bug In the Web-UI, Timestamps for 'Warning or critical alerts' are showing incorrect month #3023
* Correct display issue on Containers plugin in WebUI #3028
Continuous integration and documentation:
* Bumped minimal Python version to 3.9 #3005
* Make the glances/outputs/static/js/uiconfig.json generated automaticaly from the make webui task
* Update unit-test for Glances Central Browser
* Add unit-test for new entry point in the API (plugin/item/key)
* Add a target to start Glances with Htop features
* Try new build and publish to Pypi CI actions
Thanks to all contributors and bug reporters !
Special thanks to:
* Ariel Otilibili for code quality improvements #2801
===============
Version 4.2.1
===============
@ -513,7 +217,7 @@ See release note in Wiki format: https://github.com/nicolargo/glances/wiki/Glanc
**BREAKING CHANGES:**
* The minimal Python version is 3.8
* The Glances API version 3 is replaced by the version 4. So Restful API URL is now /api/4/ #2610
* The Glances API version 3 is replaced by the version 4. So Restfull API URL is now /api/4/ #2610
* Alias definition change in the configuration file #1735
Glances version 3.x and lower:
@ -538,9 +242,9 @@ Minimal requirements for Glances version 4 are:
* packaging
* ujson
* pydantic
* fastapi (for WebUI / RestFul API)
* uvicorn (for WebUI / RestFul API)
* jinja2 (for WebUI / RestFul API)
* fastapi (for WebUI / RestFull API)
* uvicorn (for WebUI / RestFull API)
* jinja2 (for WebUI / RestFull API)
Majors changes between Glances version 3 and version 4:
@ -600,7 +304,7 @@ Bug corrected:
CI and documentation:
* New logo for Glances version 4.0 #2713
* Update api-restful.rst documentation #2496
* Update api.rst documentation #2496
* Change Renovate config #2729
* Docker compose password unrecognized arguments when applying docs #2698
* Docker includes OS Release Volume mount info #2473
@ -978,7 +682,7 @@ Bugs corrected:
* Threading.Event.isSet is deprecated in Python 3.10 #2017
* Fix code scanning alert - Clear-text logging of sensitive information security #2006
* The gpu temperature unit are displayed incorrectly in web ui bug #2002
* Doc for 'alert' Restful/JSON API response documentation #1994
* Doc for 'alert' Restfull/JSON API response documentation #1994
* Show the spinning state of a disk documentation #1993
* Web server status check endpoint enhancement #1988
* --time parameter being ignored for client/server mode bug #1978
@ -1073,7 +777,7 @@ Bugs corrected:
* [3.2.0/3.2.1] keybinding not working anymore #1904
* InfluxDB/InfluxDB2 Export object has no attribute hostname #1899
Documentation: The "make docs" generate RestFul/API documentation file.
Documentation: The "make docs" generate RestFull/API documentation file.
===============
Version 3.2.1
@ -2100,7 +1804,7 @@ Version 2.1
* Add Glances log message (in the /tmp/glances.log file)
The default log level is INFO, you can switch to the DEBUG mode using the -d option on the command line.
* Add RESTful API to the Web server mode
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
* Improve SNMP fallback mode for Cisco IOS, VMware ESXi
* Add --theme-white feature to optimize display for white background
* Experimental history feature (--enable-history option on the command line)

View File

@ -1,385 +0,0 @@
Glances 🌟
==========
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
CPU, memory, disk, network usage etc. It also allows monitoring of running processes,
logged in users, temperatures, voltages, fan speeds etc.
It also supports container monitoring, it supports different container management
systems such as Docker, LXC. The information is presented in an easy to read dashboard
and can also be used for remote monitoring of systems via a web interface or command
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Usage 👋
========
For the standalone mode, just run:
.. code-block:: console
$ glances
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/glances-responsive-webdesign.png
For the Web server mode, run:
.. code-block:: console
$ glances -w
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/screenshot-web.png
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
.. code-block:: console
$ glances -s
and this one on the client:
.. code-block:: console
$ glances -c <ip>
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
.. code-block:: console
$ glances --browser
or WebUI:
.. code-block:: console
$ glances -w --browser
It possible to display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/screenshot-fetch.png
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu["total"]
7.3
>>> gl.mem["used"]
12498582144
>>> gl.auto_unit(gl.mem["used"])
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network["wlp0s20f3"]
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM! and the `FAQ`_), please post it on the official Reddit `forum`_ or in GitHub `Discussions`_.
Gateway to other services 🌐
============================
Glances can export stats to:
- ``CSV`` file
- ``JSON`` file
- ``InfluxDB`` server
- ``Cassandra`` server
- ``CouchDB`` server
- ``OpenTSDB`` server
- ``Prometheus`` server
- ``StatsD`` server
- ``ElasticSearch`` server
- ``PostgreSQL/TimeScale`` server
- ``RabbitMQ/ActiveMQ`` broker
- ``ZeroMQ`` broker
- ``Kafka`` broker
- ``Riemann`` server
- ``Graphite`` server
- ``RESTful`` endpoint
Installation 🚀
===============
There are several methods to test/install Glances on your system. Choose your weapon!
PyPI: Pip, the standard way
---------------------------
Glances is on ``PyPI``. By using PyPI, you will be using the latest stable version.
To install Glances, simply use the ``pip`` command line.
Warning: on modern Linux operating systems, you may have an externally-managed-environment
error message when you try to use ``pip``. In this case, go to the the PipX section below.
.. code-block:: console
pip install --user glances
*Note*: Python headers are required to install `psutil`_, a Glances
dependency. For example, on Debian/Ubuntu **the simplest** is
``apt install python3-psutil`` or alternatively need to install first
the *python-dev* package and gcc (*python-devel* on Fedora/CentOS/RHEL).
For Windows, just install psutil from the binary installation file.
By default, Glances is installed **without** the Web interface dependencies.
To install it, use the following command:
.. code-block:: console
pip install --user 'glances[web]'
For a full installation (with all features, see features list bellow):
.. code-block:: console
pip install --user 'glances[all]'
Features list:
- all: install dependencies for all features
- action: install dependencies for action feature
- browser: install dependencies for Glances centram browser
- cloud: install dependencies for cloud plugin
- containers: install dependencies for container plugin
- export: install dependencies for all exports modules
- gpu: install dependencies for GPU plugin
- graph: install dependencies for graph export
- ip: install dependencies for IP public option
- raid: install dependencies for RAID plugin
- sensors: install dependencies for sensors plugin
- smart: install dependencies for smart plugin
- snmp: install dependencies for SNMP
- sparklines: install dependencies for sparklines option
- web: install dependencies for Webserver (WebUI) and Web API
- wifi: install dependencies for Wifi plugin
To upgrade Glances to the latest version:
.. code-block:: console
pip install --user --upgrade glances
The current develop branch is published to the test.pypi.org package index.
If you want to test the develop version (could be instable), enter:
.. code-block:: console
pip install --user -i https://test.pypi.org/simple/ Glances
PyPI: PipX, the alternative way
-------------------------------
Install PipX on your system (apt install pipx on Ubuntu).
Install Glances (with all features):
.. code-block:: console
pipx install 'glances[all]'
The glances script will be installed in the ~/.local/bin folder.
Shell tab completion 🔍
=======================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
For example, on a Linux operating system with bash shell:
.. code-block:: console
$ mkdir -p ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion
$ glances --print-completion bash > ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
$ source ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
Following shells are supported: bash, zsh and tcsh.
Requirements 🧩
===============
Glances is developed in Python. A minimal Python version 3.10 or higher
should be installed on your system.
*Note for Python 2 users*
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.10).
Please uses Glances version 3.4.x if you need Python 2 support.
Dependencies:
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
Extra dependencies:
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``kafka-python`` (for the Kafka export module)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``psycopg[binary]`` (for the PostgreSQL/TimeScale export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
Project sponsorship 🙌
======================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
Any and all contributions are greatly appreciated.
Authors and Contributors 🔥
===========================
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License 📜
==========
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
.. _psutil: https://github.com/giampaolo/psutil
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://www.reddit.com/r/glances/
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html
.. _FAQ: https://github.com/nicolargo/glances/blob/develop/docs/faq.rst
.. _Discussions: https://github.com/nicolargo/glances/discussions

View File

@ -1,18 +1,10 @@
.. raw:: html
<div align="center">
.. image:: ./docs/_static/glances-responsive-webdesign.png
.. raw:: html
<h1>Glances</h1>
An Eye on your System
===============================
Glances - An eye on your system
===============================
| |pypi| |test| |contributors| |quality|
| |starts| |docker| |pypistat| |sponsors|
| |reddit|
| |starts| |docker| |pypistat|
| |sponsors| |twitter|
.. |pypi| image:: https://img.shields.io/pypi/v/glances.svg
:target: https://pypi.python.org/pypi/Glances
@ -45,20 +37,12 @@ An Eye on your System
:target: https://github.com/sponsors/nicolargo
:alt: Sponsors
.. |twitter| image:: https://img.shields.io/badge/X-000000?style=for-the-badge&logo=x&logoColor=white
.. |twitter| image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
:alt: @nicolargo
.. |reddit| image:: https://img.shields.io/badge/Reddit-FF4500?style=for-the-badge&logo=reddit&logoColor=white
:target: https://www.reddit.com/r/glances/
:alt: @reddit
.. raw:: html
</div>
Summary 🌟
==========
Summary
=======
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
@ -70,199 +54,98 @@ and can also be used for remote monitoring of systems via a web interface or com
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-summary.png
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-responsive-webdesign.png
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Usage 👋
========
Project sponsorship
===================
For the standalone mode, just run:
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
.. code-block:: console
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
$ glances
Any and all contributions are greatly appreciated.
.. image:: ./docs/_static/glances-summary.png
Requirements
============
For the Web server mode, run:
- ``python>=3.8`` (use Glances 3.4.x for lower Python version)
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``orjson`` (an optimized alternative to the standard json module)
.. code-block:: console
*Note for Python 2 users*
$ glances -w
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.8).
Please uses Glances version 3.4.x if you need Python 2 support.
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
Optional dependencies:
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``jinja2`` (for templating, used under the hood by FastAPI)
- ``kafka-python`` (for the Kafka export module)
- ``netifaces`` (for the IP plugin)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
.. image:: ./docs/_static/screenshot-web.png
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
.. code-block:: console
$ glances -s
and this one on the client:
.. code-block:: console
$ glances -c <ip>
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
.. code-block:: console
$ glances --browser
or WebUI:
.. code-block:: console
$ glances -w --browser
It possible to display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: ./docs/_static/screenshot-fetch.png
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu.get("total")
7.3
>>> gl.mem.get("used")
12498582144
>>> gl.auto_unit(gl.mem.get("used"))
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network.get("wlp0s20f3")
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM! and the `FAQ`_), please post it on the official Reddit `forum`_ or in GitHub `Discussions`_.
Gateway to other services 🌐
============================
Glances can export stats to:
- files: ``CSV`` and ``JSON``
- databases: ``InfluxDB``, ``ElasticSearch``, ``PostgreSQL/TimeScale``, ``Cassandra``, ``CouchDB``, ``OpenTSDB``, ``Prometheus``, ``StatsD``, ``Riemann`` and ``Graphite``
- brokers: ``RabbitMQ/ActiveMQ``, ``NATS``, ``ZeroMQ`` and ``Kafka``
- others: ``RESTful`` endpoint
Installation 🚀
===============
Installation
============
There are several methods to test/install Glances on your system. Choose your weapon!
PyPI: Pip, the standard way
---------------------------
Glances is on ``PyPI``. By using PyPI, you will be using the latest stable version.
Glances is on ``PyPI``. By using PyPI, you will be using the latest
stable version.
To install Glances, simply use the ``pip`` command line in an virtual environment.
To install Glances, simply use the ``pip`` command line.
Warning: on modern Linux operating systems, you may have an externally-managed-environment
error message when you try to use ``pip``. In this case, go to the the PipX section below.
.. code-block:: console
cd ~
python3 -m venv ~/.venv
source ~/.venv/bin/activate
pip install glances
pip install --user glances
*Note*: Python headers are required to install `psutil`_, a Glances
dependency. For example, on Debian/Ubuntu **the simplest** is
@ -270,55 +153,38 @@ dependency. For example, on Debian/Ubuntu **the simplest** is
the *python-dev* package and gcc (*python-devel* on Fedora/CentOS/RHEL).
For Windows, just install psutil from the binary installation file.
By default, Glances is installed **without** the Web interface dependencies.
By default, Glances is installed without the Web interface dependencies.
To install it, use the following command:
.. code-block:: console
pip install 'glances[web]'
pip install --user 'glances[web]'
For a full installation (with all features, see features list bellow):
For a full installation (with all features):
.. code-block:: console
pip install 'glances[all]'
Features list:
- all: install dependencies for all features
- action: install dependencies for action feature
- browser: install dependencies for Glances centram browser
- cloud: install dependencies for cloud plugin
- containers: install dependencies for container plugin
- export: install dependencies for all exports modules
- gpu: install dependencies for GPU plugin
- graph: install dependencies for graph export
- ip: install dependencies for IP public option
- raid: install dependencies for RAID plugin
- sensors: install dependencies for sensors plugin
- smart: install dependencies for smart plugin
- snmp: install dependencies for SNMP
- sparklines: install dependencies for sparklines option
- web: install dependencies for Webserver (WebUI) and Web API
- wifi: install dependencies for Wifi plugin
pip install --user 'glances[all]'
To upgrade Glances to the latest version:
.. code-block:: console
pip install --upgrade glances
pip install --user --upgrade glances
The current develop branch is published to the test.pypi.org package index.
If you want to test the develop version (could be instable), enter:
.. code-block:: console
pip install --user -i https://test.pypi.org/simple/ Glances
PyPI: PipX, the alternative way
-------------------------------
Install PipX on your system. For example on Ubuntu/Debian:
Install PipX on your system (apt install pipx on Ubuntu).
.. code-block:: console
sudo apt install pipx
Then install Glances (with all features):
Install Glances (with all features):
.. code-block:: console
@ -326,19 +192,19 @@ Then install Glances (with all features):
The glances script will be installed in the ~/.local/bin folder.
To upgrade Glances to the latest version:
.. code-block:: console
pipx upgrade glances
Docker: the cloudy way
----------------------
Glances Docker images are available. You can use it to monitor your
Glances Docker images are availables. You can use it to monitor your
server and all your containers !
The following tags are available:
Get the Glances container:
.. code-block:: console
docker pull nicolargo/glances:latest-full
The following tags are availables:
- *latest-full* for a full Alpine Glances image (latest release) with all dependencies
- *latest* for a basic Alpine Glances (latest release) version with minimal dependencies (FastAPI and Docker)
@ -382,32 +248,13 @@ Run the container in *Web server mode*:
For a full list of options, see the Glances `Docker`_ documentation page.
It is also possible to use a simple Docker compose file (see in ./docker-compose/docker-compose.yml):
.. code-block:: console
cd ./docker-compose
docker-compose up
It will start a Glances server with WebUI.
Brew: The missing package manager
---------------------------------
For Linux and Mac OS, it is also possible to install Glances with `Brew`_:
.. code-block:: console
brew install glances
GNU/Linux package
-----------------
`Glances` is available on many Linux distributions, so you should be
able to install it using your favorite package manager. Nevetheless,
i do not recommend it. Be aware that when you use this method the operating
system `package`_ for `Glances`may not be the latest version and only basics
plugins are enabled.
able to install it using your favorite package manager. Be aware that
when you use this method the operating system `package`_ for `Glances`
may not be the latest version and only basics plugins are enabled.
Note: The Debian package (and all other Debian-based distributions) do
not include anymore the JS statics files used by the Web interface
@ -418,30 +265,13 @@ higher, the path to the statics file is configurable (see ``issue2612``).
FreeBSD
-------
On FreeBSD, package name depends on the Python version.
Check for Python version:
To install the binary package:
.. code-block:: console
# python --version
# pkg install py39-glances
Install the Glances package:
.. code-block:: console
# pkg install pyXY-glances
Where X and Y are the Major and Minor Values of your Python System.
.. code-block:: console
# Example for Python 3.11.3: pkg install py311-glances
**NOTE:** Check Glances Binary Package Version for your System Architecture.
You must have the Correct Python Version Installed which corresponds to the Glances Binary Package.
To install Glances from Ports:
To install Glances from ports:
.. code-block:: console
@ -451,7 +281,9 @@ To install Glances from Ports:
macOS
-----
MacOS users can install Glances using ``Homebrew`` or ``MacPorts``.
If you do not want to use the glancesautoinstall script, follow this procedure.
macOS users can install Glances using ``Homebrew`` or ``MacPorts``.
Homebrew
````````
@ -471,7 +303,11 @@ Windows
-------
Install `Python`_ for Windows (Python 3.4+ ship with pip) and
follow the Glances Pip install procedure.
then run the following command:
.. code-block:: console
$ pip install glances
Android
-------
@ -525,77 +361,94 @@ Ansible
A Glances ``Ansible`` role is available: https://galaxy.ansible.com/zaxos/glances-ansible-role/
Shell tab completion 🔍
=======================
Usage
=====
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
For example, on a Linux operating system with bash shell:
For the standalone mode, just run:
.. code-block:: console
$ mkdir -p ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion
$ glances --print-completion bash > ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
$ source ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
$ glances
Following shells are supported: bash, zsh and tcsh.
For the Web server mode, run:
Requirements 🧩
===============
.. code-block:: console
Glances is developed in Python. A minimal Python version 3.10 or higher
should be installed on your system.
$ glances -w
*Note for Python 2 users*
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.10).
Please uses Glances version 3.4.x if you need Python 2 support.
For the client/server mode, run:
Dependencies:
.. code-block:: console
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
$ glances -s
Extra dependencies:
on the server side and run:
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``kafka-python`` (for the Kafka export module)
- ``nats-py`` (for the NATS export module)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``psycopg[binary]`` (for the PostgreSQL/TimeScale export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
.. code-block:: console
How to contribute ? 🤝
======================
$ glances -c <ip>
on the client one.
You can also detect and display all Glances servers available on your
network or defined in the configuration file:
.. code-block:: console
$ glances --browser
You can also display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
and RTFM, always.
Documentation
=============
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM!), please post it on the official Q&A `forum`_.
Gateway to other services
=========================
Glances can export stats to: ``CSV`` file, ``JSON`` file, ``InfluxDB``, ``Cassandra``, ``CouchDB``,
``OpenTSDB``, ``Prometheus``, ``StatsD``, ``ElasticSearch``, ``RabbitMQ/ActiveMQ``,
``ZeroMQ``, ``Kafka``, ``Riemann``, ``Graphite`` and ``RESTful`` server.
How to contribute ?
===================
If you want to contribute to the Glances project, read this `wiki`_ page.
@ -604,53 +457,30 @@ There is also a chat dedicated to the Glances developers:
.. image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/nicolargo/glances?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
Project sponsorship 🙌
======================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
Any and all contributions are greatly appreciated.
Authors and Contributors 🔥
===========================
Author
======
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License 📜
==========
License
=======
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
More stars ! 🌟
===============
Please give us a star on `GitHub`_ if you like this project.
.. image:: https://api.star-history.com/svg?repos=nicolargo/glances&type=Date
:target: https://www.star-history.com/#nicolargo/glances&Date
:alt: Star history
.. _psutil: https://github.com/giampaolo/psutil
.. _Brew: https://formulae.brew.sh/formula/glances
.. _glancesautoinstall: https://github.com/nicolargo/glancesautoinstall
.. _Python: https://www.python.org/getit/
.. _Termux: https://play.google.com/store/apps/details?id=com.termux
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://www.reddit.com/r/glances/
.. _forum: https://groups.google.com/forum/?hl=en#!forum/glances-users
.. _wiki: https://github.com/nicolargo/glances/wiki/How-to-contribute-to-Glances-%3F
.. _package: https://repology.org/project/glances/versions
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _Docker: https://github.com/nicolargo/glances/blob/master/docs/docker.rst
.. _GitHub: https://github.com/nicolargo/glances
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html
.. _FAQ: https://github.com/nicolargo/glances/blob/develop/docs/faq.rst
.. _Discussions: https://github.com/nicolargo/glances/discussions
.. _issue2021: https://github.com/nicolargo/glances/issues/2021
.. _issue2021comment: https://github.com/nicolargo/glances/issues/2021#issuecomment-1197831157
.. _issue2612: https://github.com/nicolargo/glances/issues/2612
.. _Docker: https://github.com/nicolargo/glances/blob/develop/docs/docker.rst

View File

@ -1,237 +0,0 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
annotated-doc==0.0.4
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.12.0
# via
# elasticsearch
# starlette
batinfo==0.4.2 ; sys_platform == 'linux'
# via glances
bernhard==0.2.6
# via glances
cassandra-driver==3.29.3
# via glances
certifi==2025.11.12
# via
# elastic-transport
# influxdb-client
# influxdb3-python
# requests
cffi==2.0.0 ; implementation_name == 'pypy' or platform_python_implementation != 'PyPy'
# via
# cryptography
# pyzmq
chardet==5.2.0
# via pysmart
charset-normalizer==3.4.4
# via requests
chevron==0.14.0
# via glances
click==8.1.8
# via
# geomet
# uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via click
cryptography==46.0.3
# via pysnmpcrypto
defusedxml==0.7.1
# via glances
dnspython==2.8.0
# via pymongo
docker==7.1.0
# via glances
elastic-transport==9.2.1
# via elasticsearch
elasticsearch==9.2.1
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via anyio
fastapi==0.128.0
# via glances
geomet==1.1.0
# via cassandra-driver
graphitesender==0.11.2
# via glances
h11==0.16.0
# via uvicorn
humanfriendly==10.0
# via pysmart
ibm-cloud-sdk-core==3.24.2
# via ibmcloudant
ibmcloudant==0.11.2
# via glances
idna==3.11
# via
# anyio
# requests
ifaddr==0.2.0
# via zeroconf
importlib-metadata==8.7.1
# via pygal
influxdb==5.3.2
# via glances
influxdb-client==1.49.0
# via glances
influxdb3-python==0.16.0
# via glances
jinja2==3.1.6
# via
# glances
# pysmi-lextudio
kafka-python==2.3.0
# via glances
markupsafe==3.0.3
# via jinja2
msgpack==1.1.2
# via influxdb
nats-py==2.12.0
# via glances
nvidia-ml-py==13.590.44
# via glances
packaging==25.0
# via glances
paho-mqtt==2.1.0
# via glances
pbkdf2==1.3
# via wifi
pika==1.3.2
# via glances
ply==3.11
# via pysmi-lextudio
podman==5.6.0
# via glances
potsdb==1.0.3
# via glances
prometheus-client==0.23.1
# via glances
protobuf==6.33.2
# via bernhard
psutil==7.2.1
# via glances
psycopg==3.3.2
# via glances
psycopg-binary==3.3.2 ; implementation_name != 'pypy'
# via psycopg
pyarrow==22.0.0
# via influxdb3-python
pyasn1==0.6.1
# via pysnmp-lextudio
pycparser==2.23 ; (implementation_name != 'PyPy' and platform_python_implementation != 'PyPy') or (implementation_name == 'pypy' and platform_python_implementation == 'PyPy')
# via cffi
pydantic==2.12.5
# via fastapi
pydantic-core==2.41.5
# via pydantic
pygal==3.1.0
# via glances
pyjwt==2.10.1
# via
# ibm-cloud-sdk-core
# ibmcloudant
pymdstat==0.4.3
# via glances
pymongo==4.15.5
# via glances
pyreadline3==3.5.4 ; sys_platform == 'win32'
# via humanfriendly
pysmart==1.4.2
# via glances
pysmi-lextudio==1.4.3
# via pysnmp-lextudio
pysnmp-lextudio==6.1.2
# via glances
pysnmpcrypto==0.0.4
# via pysnmp-lextudio
python-dateutil==2.9.0.post0
# via
# elasticsearch
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# influxdb-client
# influxdb3-python
pytz==2025.2
# via influxdb
pywin32==311 ; sys_platform == 'win32'
# via docker
pyzmq==27.1.0
# via glances
reactivex==4.1.0
# via
# influxdb-client
# influxdb3-python
requests==2.32.5
# via
# docker
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# podman
# pysmi-lextudio
setuptools==80.9.0
# via
# influxdb-client
# wifi
shtab==1.8.0 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# influxdb
# python-dateutil
sniffio==1.3.1
# via
# elastic-transport
# elasticsearch
sparklines==0.7.0
# via glances
starlette==0.50.0
# via fastapi
statsd==4.0.1
# via glances
termcolor==3.3.0
# via sparklines
tomli==2.0.2 ; python_full_version < '3.11'
# via podman
typing-extensions==4.15.0
# via
# anyio
# cryptography
# elasticsearch
# fastapi
# psycopg
# pydantic
# pydantic-core
# reactivex
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
tzdata==2025.3 ; sys_platform == 'win32'
# via psycopg
urllib3==2.6.2
# via
# docker
# elastic-transport
# ibm-cloud-sdk-core
# influxdb-client
# influxdb3-python
# podman
# requests
uvicorn==0.40.0
# via glances
wifi==0.3.8
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances
zeroconf==0.148.0
# via glances
zipp==3.23.0
# via importlib-metadata

View File

@ -1,9 +0,0 @@
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}

View File

@ -1,23 +0,0 @@
_____ _
/ ____| |
| | __| | __ _ _ __ ___ ___ ___
| | |_ | |/ _` | '_ \ / __/ _ \/ __|
| |__| | | (_| | | | | (_| __/\__
\_____|_|\__,_|_| |_|\___\___||___/
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} ⚡ {{ process['cpu_percent'] }}% CPU{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }} ⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -30,15 +30,9 @@ history_size=1200
# Set the the Curses and WebUI interface left menu plugin list (comma-separated)
#left_menu=network,wifi,connections,ports,diskio,fs,irq,folders,raid,smart,sensors,now
# Limit the number of processes to display (in the WebUI)
#max_processes_display=25
#
# Specifics options for TUI
#--------------------------
# Disable background color
#disable_bg=True
#
# Specifics options for WebUI
#----------------------------
max_processes_display=25
# Options for WebUI
#------------------
# Set URL prefix for the WebUI and the API
# Example: url_prefix=/glances/ => http://localhost/glances/
# Note: The final / is mandatory
@ -49,7 +43,7 @@ history_size=1200
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -64,10 +58,6 @@ history_size=1200
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile_password=kfp
#ssl_keyfile=./glances.local+3-key.pem
#ssl_certfile=./glances.local+3.pem
##############################################################################
# Plugins
@ -131,7 +121,7 @@ user_careful=50
user_warning=70
user_critical=90
user_log=False
#user_critical_action=echo "{{time}} User CPU {{user}} higher than {{critical}}" > /tmp/cpu.alert
#user_critical_action=echo {{user}} {{value}} {{max}} > /tmp/cpu.alert
#
system_careful=50
system_warning=70
@ -185,14 +175,12 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
#careful_action_repeat=echo {{percent}} >> /tmp/memory.alert
warning=70
critical=90
#critical_action_repeat=echo "{{time}} {{percent}} higher than {{critical}}"" >> /tmp/memory.alert
[memswap]
disable=False
@ -201,7 +189,6 @@ disable=False
careful=50
warning=70
critical=90
#warning_action=echo "{{time}} {{percent}} higher than {{warning}}"" > /tmp/memory.alert
[load]
disable=False
@ -248,9 +235,8 @@ hide_zero=False
#wlan0_tx_warning=900000
#wlan0_tx_critical=1000000
#wlan0_tx_log=True
#wlan0_rx_critical_action=echo "{{time}} {{interface_name}} RX {{bytes_recv_rate_per_sec}}Bps" > /tmp/network.alert
# Alias for network interface name
#alias=wlp0s20f3:WIFI
#alias=wlp2s0:WIFI
[ip]
# Disable display of private IP address
@ -308,32 +294,15 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=False
#dm-0_rx_log=True
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=False
#dm-0_tx_log=True
[fs]
disable=False
@ -343,19 +312,15 @@ hide=/boot.*,.*/snap.*
#show=/,/srv
# Define filesystem space thresholds in %
# Default values if not defined: 50/70/90
# It is also possible to define per mount point value
# Example: /_careful=40
careful=50
warning=70
critical=90
# It is also possible to define per mount point value
# Example: /_careful=40
#/_careful=1
#/_warning=5
#/_critical=10
#/_critical_action=echo "{{time}} {{mnt_point}} filesystem space {{percent}}% higher than {{critical}}%" > /tmp/fs.alert
# Allow additional file system types (comma-separated FS type)
#allow=shm
# Alias for root file system
#alias=/:Root,/zfspool:ZFS
#alias=/:Root
[irq]
# Documentation: https://glances.readthedocs.io/en/latest/aoa/irq.html
@ -398,12 +363,6 @@ disable=True
# Documentation: https://glances.readthedocs.io/en/latest/aoa/smart.html
# This plugin is disabled by default
disable=True
# Define the list of sensors to hide (comma-separated regexp)
#hide=.*Hide_this_driver.*
# Define the list of sensors to show (comma-separated regexp)
#show=.*Drive_Temperature.*
# List of attributes to hide (comma separated)
#hide_attributes=Self-tests,Errors
[hddtemp]
disable=False
@ -415,34 +374,26 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
#show=CPU.*
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
# temperature_core_Ambient_careful=40
# temperature_core_Ambient_warning=60
# temperature_core_Ambient_critical=85
# temperature_core_Ambient_log=True
# temperature_core_Ambient_critical_action=echo "{{time}} {{label}} temperature {{value}}{{unit}} higher than {{critical}}{{unit}}" > /tmp/temperature.alert
# Overwrite thresholds for a specific type of sensor
# Default values are grabbed from the system
#temperature_core_careful=45
#temperature_core_warning=65
#temperature_core_critical=80
# Temperatures threshold in °C for hddtemp
# Default values if not defined: 45/52/60
#temperature_hdd_careful=45
#temperature_hdd_warning=52
#temperature_hdd_critical=60
temperature_hdd_careful=45
temperature_hdd_warning=52
temperature_hdd_critical=60
# Battery threshold in %
# Default values if not defined: 70/80/90
#battery_careful=70
#battery_warning=80
#battery_critical=90
battery_careful=80
battery_warning=90
battery_critical=95
# Fan speed threshold in RPM
#fan_speed_careful=100
# Sensors alias
@ -459,12 +410,6 @@ disable=False
# Should be one of the following:
# cpu_percent, memory_percent, io_counters, name, cpu_times, username
#sort_key=memory_percent
# List of stats to disable (not grabed and not display)
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -481,20 +426,13 @@ mem_critical=90
nice_warning=-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19
#
# Nice: Example 2, low priority processes escalate from careful to critical
#nice_ok=O
#nice_careful=1,2,3,4,5,6,7,8,9
#nice_warning=10,11,12,13,14
#nice_critical=15,16,17,18,19
#
# Status: define threshold regarding the process status (first letter of process status)
# R: Running, S: Sleeping, Z: Zombie (complete list here https://psutil.readthedocs.io/en/latest/#process-status-constants)
status_ok=R,W,P,I
status_critical=Z,D
# Define the list of processes to export using:
# a comma-separated list of Glances filter
#export=.*firefox.*,pid:1234
# Define a list of process to focus on (comma-separated list of Glances filter)
#focus=.*firefox.*,.*python.*
[ports]
disable=False
@ -543,11 +481,10 @@ port_default_gateway=True
#web_4_description=Intranet
[vms]
disable=True
disable=False
# Define the maximum VMs size name (default is 20 chars)
max_name_size=20
# By default, Glances only display running VMs with states:
# 'Running', 'Paused', 'Starting' or 'Restarting'
# By default, Glances only display running VMs with states: 'Running', 'Starting' or 'Restarting'
# Set the following key to True to display all VMs regarding their states
all=False
@ -561,11 +498,8 @@ disable=False
; hide=telegraf
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
# Thresholds for CPU and MEM (in %)
; cpu_warning=70
; cpu_critical=90
; mem_careful=20
@ -608,21 +542,15 @@ disable=False
# You can also add stats with key, like sensors:value:Ambient (key is case sensitive)
#columns=system:hr_name,load:min5,cpu:total,mem:percent,memswap:percent,sensors:value:Ambient,sensors:value:Composite
# Define the static servers list
# _protocol can be: rpc (default if not defined) or rest
# List is limited to 256 servers max (1 to 256)
#server_1_name=localhost
#server_1_alias=Local WebUI
#server_1_port=61266
#server_1_protocol=rest
#server_1_alias=My local PC
#server_1_port=61209
#server_2_name=localhost
#server_2_alias=My local PC
#server_2_port=61209
#server_2_protocol=rpc
#server_2_port=61235
#server_3_name=192.168.0.17
#server_3_alias=Another PC on my network
#server_3_port=61209
#server_1_protocol=rpc
#server_4_name=notagooddefinition
#server_4_name=pasbon
#server_4_port=61237
[passwords]
@ -641,16 +569,11 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created
# Can be overwrite by the --graph-path command line option
path=/tmp/glances
path=/tmp
# It is possible to generate the graphs automatically by setting the
# generate_every to a non zero value corresponding to the seconds between
# two generation. Set it to 0 to disable graph auto generation.
@ -664,7 +587,7 @@ style=DarkStyle
[influxdb]
# !!!
# Will be DEPRECATED in future release.
# Please have a look on the new influxdb3 export module
# Please have a look on the new influxdb2 export module (compatible with InfluxDB 1.8.x and 2.x)
# !!!
# Configuration for the --export influxdb option
# https://influxdb.com/
@ -693,28 +616,7 @@ port=8086
protocol=http
org=nicolargo
bucket=glances
token=PUT_YOUR_INFLUXDB2_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
# Prefix will be added for all measurement name
# Ex: prefix=foo
# => foo.cpu
# => foo.mem
# You can also use dynamic values
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname and name (for process) are always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
[influxdb3]
# Configuration for the --export influxdb3 option
# https://influxdb.com/
host=http://localhost:8181
org=nicolargo
database=glances
token=PUT_YOUR_INFLUXDB3_TOKEN_HERE
token=EjFUTWe8U-MIseEAkaVIgVnej_TrnbdvEcRkaB1imstW7gapSqy6_6-8XD-yd51V0zUUpDy-kAdVD1purDLuxA==
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
@ -880,26 +782,6 @@ prefix=glances
# By default, system_name = FQDN
#system_name=mycomputer
[timescaledb]
# Configuration for the --export timescaledb option
# https://www.timescale.com/
host=localhost
port=5432
db=glances
user=postgres
password=password
# Overwrite device name (default is the FQDN)
# Most of the time, you should not overwrite this value
#hostname=mycomputer
[nats]
# Configuration for the --export nats option
# https://nats.io/
# Host is a separated list of NATS nodes
host=nats://localhost:4222
# Prefix for the subjects (default is 'glances')
prefix=glances
##############################################################################
# AMPS
# * enable: Enable (true) or disable (false) the AMP

View File

@ -1,395 +1,14 @@
# This file was autogenerated by uv via the following command:
# uv export --no-hashes --only-dev --output-file dev-requirements.txt
alabaster==1.0.0
# via sphinx
annotated-types==0.7.0
# via pydantic
anyio==4.12.0
# via
# httpx
# mcp
# sse-starlette
# starlette
attrs==25.4.0
# via
# glom
# jsonschema
# outcome
# referencing
# reuse
# semgrep
# trio
babel==2.17.0
# via sphinx
boltons==21.0.0
# via
# face
# glom
# semgrep
boolean-py==5.0
# via license-expression
bracex==2.6
# via wcmatch
certifi==2025.11.12
# via
# httpcore
# httpx
# requests
# selenium
cffi==2.0.0 ; (implementation_name != 'pypy' and os_name == 'nt') or platform_python_implementation != 'PyPy'
# via
# cryptography
# trio
cfgv==3.5.0
# via pre-commit
charset-normalizer==3.4.4
# via
# python-debian
# requests
click==8.1.8
# via
# click-option-group
# reuse
# semgrep
# typer
# uvicorn
click-option-group==0.5.9
# via semgrep
codespell==2.4.1
colorama==0.4.6
# via
# click
# pytest
# semgrep
# sphinx
contourpy==1.3.2 ; python_full_version < '3.11'
# via matplotlib
contourpy==1.3.3 ; python_full_version >= '3.11'
# via matplotlib
cryptography==46.0.3
# via pyjwt
cycler==0.12.1
# via matplotlib
distlib==0.4.0
# via virtualenv
docutils==0.21.2
# via
# rstcheck-core
# sphinx
# sphinx-rtd-theme
exceptiongroup==1.2.2
# via
# anyio
# pytest
# semgrep
# trio
# trio-websocket
face==24.0.0
# via glom
filelock==3.20.2
# via virtualenv
fonttools==4.61.1
# via matplotlib
glom==22.1.0
# via semgrep
googleapis-common-protos==1.72.0
# via opentelemetry-exporter-otlp-proto-http
gprof2dot==2025.4.14
h11==0.16.0
# via
# httpcore
# uvicorn
# wsproto
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via mcp
httpx-sse==0.4.3
# via mcp
identify==2.6.15
# via pre-commit
idna==3.11
# via
# anyio
# httpx
# requests
# trio
imagesize==1.4.1
# via sphinx
importlib-metadata==8.7.1
# via opentelemetry-api
iniconfig==2.3.0
# via pytest
jinja2==3.1.6
# via
# reuse
# sphinx
jsonschema==4.25.1
# via
# mcp
# semgrep
jsonschema-specifications==2025.9.1
# via jsonschema
kiwisolver==1.4.9
# via matplotlib
license-expression==30.4.4
# via reuse
markdown-it-py==4.0.0
# via rich
markupsafe==3.0.3
# via jinja2
matplotlib==3.10.8
mcp==1.23.3
# via semgrep
mdurl==0.1.2
# via markdown-it-py
memory-profiler==0.61.0
nodeenv==1.10.0
# via
# pre-commit
# pyright
numpy==2.2.6 ; python_full_version < '3.11'
# via
# contourpy
# matplotlib
numpy==2.4.0 ; python_full_version >= '3.11'
# via
# contourpy
# matplotlib
opentelemetry-api==1.37.0
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# semgrep
opentelemetry-exporter-otlp-proto-common==1.37.0
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-http==1.37.0
# via semgrep
opentelemetry-instrumentation==0.58b0
# via opentelemetry-instrumentation-requests
opentelemetry-instrumentation-requests==0.58b0
# via semgrep
opentelemetry-proto==1.37.0
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-sdk==1.37.0
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
opentelemetry-semantic-conventions==0.58b0
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
opentelemetry-util-http==0.58b0
# via opentelemetry-instrumentation-requests
outcome==1.3.0.post0
# via
# trio
# trio-websocket
packaging==25.0
# via
# matplotlib
# opentelemetry-instrumentation
# pytest
# requirements-parser
# semgrep
# sphinx
# webdriver-manager
peewee==3.18.3
# via semgrep
pillow==12.1.0
# via matplotlib
platformdirs==4.5.1
# via virtualenv
pluggy==1.6.0
# via pytest
pre-commit==4.5.1
protobuf==6.33.2
# via
# googleapis-common-protos
# opentelemetry-proto
psutil==7.2.1
# via memory-profiler
py-spy==0.4.1
pycparser==2.23 ; (implementation_name != 'PyPy' and implementation_name != 'pypy' and os_name == 'nt') or (implementation_name != 'PyPy' and platform_python_implementation != 'PyPy')
# via cffi
pydantic==2.12.5
# via
# mcp
# pydantic-settings
# rstcheck-core
pydantic-core==2.41.5
# via pydantic
pydantic-settings==2.12.0
# via mcp
pygments==2.19.2
# via
# pytest
# rich
# sphinx
pyinstrument==5.1.1
pyjwt==2.10.1
# via mcp
pyparsing==3.3.1
# via matplotlib
pyright==1.1.407
pysocks==1.7.1
# via urllib3
pytest==9.0.2
python-dateutil==2.9.0.post0
# via matplotlib
python-debian==1.0.1
# via reuse
python-dotenv==1.2.1
# via
# pydantic-settings
# webdriver-manager
python-magic==0.4.27
# via reuse
python-multipart==0.0.21
# via mcp
pywin32==311 ; sys_platform == 'win32'
# via
# mcp
# semgrep
pyyaml==6.0.3
# via pre-commit
referencing==0.37.0
# via
# jsonschema
# jsonschema-specifications
requests==2.32.5
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
# sphinx
# webdriver-manager
requirements-parser==0.13.0
reuse==6.2.0
rich==13.5.3
# via
# semgrep
# typer
roman-numerals==4.1.0 ; python_full_version >= '3.11'
# via roman-numerals-py
roman-numerals-py==4.1.0 ; python_full_version >= '3.11'
# via sphinx
rpds-py==0.30.0
# via
# jsonschema
# referencing
rstcheck==6.2.5
rstcheck-core==1.2.2
# via rstcheck
ruamel-yaml==0.19.1
# via semgrep
ruamel-yaml-clib==0.2.14
# via semgrep
ruff==0.14.10
selenium==4.39.0
semgrep==1.146.0
setuptools==80.9.0
shellingham==1.5.4
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via trio
snowballstemmer==3.0.1
# via sphinx
sortedcontainers==2.4.0
# via trio
sphinx==8.1.3 ; python_full_version < '3.11'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx==8.2.3 ; python_full_version >= '3.11'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
# via sphinx
sphinxcontrib-devhelp==2.0.0
# via sphinx
sphinxcontrib-htmlhelp==2.1.0
# via sphinx
sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
sse-starlette==3.1.2
# via mcp
starlette==0.50.0
# via
# mcp
# sse-starlette
tomli==2.0.2
# via
# pytest
# semgrep
# sphinx
tomlkit==0.13.3
# via reuse
trio==0.32.0
# via
# selenium
# trio-websocket
trio-websocket==0.12.2
# via selenium
typer==0.21.0
# via rstcheck
typing-extensions==4.15.0
# via
# anyio
# cryptography
# mcp
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# pyright
# referencing
# selenium
# semgrep
# starlette
# typer
# typing-inspection
# uvicorn
# virtualenv
typing-inspection==0.4.2
# via
# mcp
# pydantic
# pydantic-settings
urllib3==2.6.2
# via
# requests
# selenium
# semgrep
uvicorn==0.40.0 ; sys_platform != 'emscripten'
# via mcp
virtualenv==20.35.4
# via pre-commit
wcmatch==8.5.2
# via semgrep
webdriver-manager==4.0.2
websocket-client==1.9.0
# via selenium
wrapt==1.17.3
# via opentelemetry-instrumentation
wsproto==1.3.2
# via trio-websocket
zipp==3.23.0
# via importlib-metadata
codespell
fonttools>=4.43.0 # not directly required, pinned by Snyk to avoid a vulnerability
gprof2dot
matplotlib
memory-profiler
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
pre-commit
py-spy
pyright
requirements-parser
ruff
semgrep
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability

7
doc-requirements.txt Normal file
View File

@ -0,0 +1,7 @@
psutil
defusedxml
orjson
reuse
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
sphinx
sphinx_rtd_theme

View File

@ -0,0 +1,3 @@
FROM glances:local-alpine-minimal as glancesminimal
COPY glances.conf /glances/conf/glances.conf
CMD python -m glances -C /glances/conf/glances.conf $GLANCES_OPT

View File

@ -0,0 +1,40 @@
version: "3.9"
services:
reverse-proxy:
image: traefik
command: --api --docker
ports:
- "80:80"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
whoami:
image: emilevauge/whoami
labels:
- "traefik.frontend.rule=Host:whoami.docker.localhost"
monitoring:
image: nicolargo/glances:dev
restart: unless-stopped
pid: host
privileged: true
network_mode: "host"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro"
- "./glances.conf:/glances/conf/glances.conf"
environment:
- TZ=${TZ}
- "GLANCES_OPT=-C /glances/conf/glances.conf -w"
# Uncomment for GPU compatibility (Nvidia) inside the container
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
labels:
- "traefik.port=61208"
- "traefik.frontend.rule=Host:glances.docker.localhost"

View File

@ -1,55 +1,25 @@
version: '3.9'
services:
glances:
# See all images tags here: https://hub.docker.com/r/nicolargo/glances/tags
image: nicolargo/glances:latest-full
build:
context: ./
dockerfile: Dockerfile
restart: always
pid: "host"
privileged: true
network_mode: "host"
read_only: true
privileged: false
# Uncomment next line for SATA or NVME smartctl monitoring
# cap_add:
# Uncomment next line for SATA smartctl monitoring
# - SYS_RAWIO
# Uncomment next line for NVME smartctl monitoring
# - SYS_ADMIN
# devices:
# - "/dev/nvme0"
volumes:
- "/:/rootfs:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro"
- "./glances.conf:/glances/conf/glances.conf"
# Uncomment for proper distro information in upper panel.
# # Works only for distros that do have this file (most of distros do).
# - "/etc/os-release:/etc/os-release:ro"
tmpfs:
- /tmp
environment:
# Please set to your local timezone (or use local ${TZ} environment variable if set on your host)
- TZ=Europe/Paris
- GLANCES_OPT=-C /glances/conf/glances.conf -w --enable-plugin smart
- PYTHONPYCACHEPREFIX=/tmp/py_caches
# # Uncomment for GPU compatibility (Nvidia) inside the container
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# Uncomment to protect Glances WebUI by a login/password (add --password to GLANCES_OPT)
# secrets:
# - source: glances_password
# target: /root/.config/glances/<login>.pwd
# secrets:
# glances_password:
# file: ./secrets/glances_password
- TZ=${TZ}
- "GLANCES_OPT=-C /glances/conf/glances.conf -w"
# Uncomment for GPU compatibility (Nvidia) inside the container
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]

193
docker-compose/glances.conf Normal file → Executable file
View File

@ -13,7 +13,7 @@ check_update=False
# Default is 1200 values (~1h with the default refresh rate)
history_size=1200
# Set the way Glances should display the date (default is %Y-%m-%d %H:%M:%S %Z)
#strftime_format=%Y-%m-%d %H:%M:%S %Z
# strftime_format=%Y-%m-%d %H:%M:%S %Z
# Define external directory for loading additional plugins
# The layout follows the glances standard for plugin definitions
#plugin_dir=/home/user/dev/plugins
@ -31,14 +31,8 @@ history_size=1200
#left_menu=network,wifi,connections,ports,diskio,fs,irq,folders,raid,smart,sensors,now
# Limit the number of processes to display (in the WebUI)
max_processes_display=25
#
# Specifics options for TUI
#--------------------------
# Disable background color
#disable_bg=True
#
# Specifics options for WebUI
#----------------------------
# Options for WebUI
#------------------
# Set URL prefix for the WebUI and the API
# Example: url_prefix=/glances/ => http://localhost/glances/
# Note: The final / is mandatory
@ -49,7 +43,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -64,10 +58,6 @@ max_processes_display=25
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile_password=kfp
#ssl_keyfile=./glances.local+3-key.pem
#ssl_certfile=./glances.local+3.pem
##############################################################################
# Plugins
@ -131,7 +121,7 @@ user_careful=50
user_warning=70
user_critical=90
user_log=False
#user_critical_action=echo "{{time}} User CPU {{user}} higher than {{critical}}" > /tmp/cpu.alert
#user_critical_action=echo {{user}} {{value}} {{max}} > /tmp/cpu.alert
#
system_careful=50
system_warning=70
@ -185,14 +175,12 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
#careful_action_repeat=echo {{percent}} >> /tmp/memory.alert
warning=70
critical=90
#critical_action_repeat=echo "{{time}} {{percent}} higher than {{critical}}"" >> /tmp/memory.alert
[memswap]
disable=False
@ -201,7 +189,6 @@ disable=False
careful=50
warning=70
critical=90
#warning_action=echo "{{time}} {{percent}} higher than {{warning}}"" > /tmp/memory.alert
[load]
disable=False
@ -235,9 +222,8 @@ hide_no_up=True
hide_no_ip=True
# Set hide_zero to True to automatically hide interface with no traffic
hide_zero=False
# Set hide_threshold_bytes to an integer value to automatically hide
# interface with traffic less or equal than this value
#hide_threshold_bytes=0
# Set hide_threshold_bytes to an integer value to automatically hide interface with traffic less than this value
hide_threshold_bytes=0
# It is possible to overwrite the bitrate thresholds per interface
# WLAN 0 Default limits (in bits per second aka bps) for interface bitrate
#wlan0_rx_careful=4000000
@ -248,9 +234,8 @@ hide_zero=False
#wlan0_tx_warning=900000
#wlan0_tx_critical=1000000
#wlan0_tx_log=True
#wlan0_rx_critical_action=echo "{{time}} {{interface_name}} RX {{bytes_recv_rate_per_sec}}Bps" > /tmp/network.alert
# Alias for network interface name
#alias=wlp0s20f3:WIFI
#alias=wlp2s0:WIFI
[ip]
# Disable display of private IP address
@ -301,39 +286,12 @@ disable=False
hide=loop.*,/dev/loop.*
# Set hide_zero to True to automatically hide disk with no read/write
hide_zero=False
# Set hide_threshold_bytes to an integer value to automatically hide
# interface with traffic less or equal than this value
#hide_threshold_bytes=0
# Set hide_threshold_bytes to an integer value to automatically hide disk with read/write less than this value
hide_threshold_bytes=0
# Define the list of disks to be show (comma-separated)
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=False
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=False
[fs]
disable=False
@ -343,19 +301,15 @@ hide=/boot.*,.*/snap.*
#show=/,/srv
# Define filesystem space thresholds in %
# Default values if not defined: 50/70/90
# It is also possible to define per mount point value
# Example: /_careful=40
careful=50
warning=70
critical=90
# It is also possible to define per mount point value
# Example: /_careful=40
#/_careful=1
#/_warning=5
#/_critical=10
#/_critical_action=echo "{{time}} {{mnt_point}} filesystem space {{percent}}% higher than {{critical}}%" > /tmp/fs.alert
# Allow additional file system types (comma-separated FS type)
#allow=shm
# Alias for root file system
#alias=/:Root,/zsfpool:ZSF
#alias=/:Root
[irq]
# Documentation: https://glances.readthedocs.io/en/latest/aoa/irq.html
@ -398,12 +352,6 @@ disable=True
# Documentation: https://glances.readthedocs.io/en/latest/aoa/smart.html
# This plugin is disabled by default
disable=True
# Define the list of sensors to hide (comma-separated regexp)
#hide=.*Hide_this_driver.*
# Define the list of sensors to show (comma-separated regexp)
#show=.*Drive_Temperature.*
# List of attributes to hide (comma separated)
#hide_attributes=Self-tests,Errors
[hddtemp]
disable=False
@ -415,34 +363,26 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
#show=CPU.*
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
# temperature_core_Ambient_careful=40
# temperature_core_Ambient_warning=60
# temperature_core_Ambient_critical=85
# temperature_core_Ambient_log=True
# temperature_core_Ambient_critical_action=echo "{{time}} {{label}} temperature {{value}}{{unit}} higher than {{critical}}{{unit}}" > /tmp/temperature.alert
# Overwrite thresholds for a specific type of sensor
# Default values are grabbed from the system
#temperature_core_careful=45
#temperature_core_warning=65
#temperature_core_critical=80
# Temperatures threshold in °C for hddtemp
# Default values if not defined: 45/52/60
#temperature_hdd_careful=45
#temperature_hdd_warning=52
#temperature_hdd_critical=60
temperature_hdd_careful=45
temperature_hdd_warning=52
temperature_hdd_critical=60
# Battery threshold in %
# Default values if not defined: 70/80/90
#battery_careful=70
#battery_warning=80
#battery_critical=90
battery_careful=80
battery_warning=90
battery_critical=95
# Fan speed threshold in RPM
#fan_speed_careful=100
# Sensors alias
@ -459,12 +399,6 @@ disable=False
# Should be one of the following:
# cpu_percent, memory_percent, io_counters, name, cpu_times, username
#sort_key=memory_percent
# List of stats to disable (not grabed and not display)
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -481,20 +415,13 @@ mem_critical=90
nice_warning=-20,-19,-18,-17,-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19
#
# Nice: Example 2, low priority processes escalate from careful to critical
#nice_ok=O
#nice_careful=1,2,3,4,5,6,7,8,9
#nice_warning=10,11,12,13,14
#nice_critical=15,16,17,18,19
#
# Status: define threshold regarding the process status (first letter of process status)
# R: Running, S: Sleeping, Z: Zombie (complete list here https://psutil.readthedocs.io/en/latest/#process-status-constants)
status_ok=R,W,P,I
status_critical=Z,D
# Define the list of processes to export using:
# a comma-separated list of Glances filter
#export=.*firefox.*,pid:1234
# Define a list of process to focus on (comma-separated list of Glances filter)
#focus=.*firefox.*,.*python.*
[ports]
disable=False
@ -546,8 +473,7 @@ port_default_gateway=False
disable=True
# Define the maximum VMs size name (default is 20 chars)
max_name_size=20
# By default, Glances only display running VMs with states:
# 'Running', 'Paused', 'Starting' or 'Restarting'
# By default, Glances only display running VMs with states: 'Running', 'Starting' or 'Restarting'
# Set the following key to True to display all VMs regarding their states
all=False
@ -561,11 +487,8 @@ disable=False
; hide=telegraf
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
# Thresholds for CPU and MEM (in %)
; cpu_warning=70
; cpu_critical=90
; mem_careful=20
@ -608,21 +531,15 @@ disable=False
# You can also add stats with key, like sensors:value:Ambient (key is case sensitive)
#columns=system:hr_name,load:min5,cpu:total,mem:percent,memswap:percent,sensors:value:Ambient,sensors:value:Composite
# Define the static servers list
# _protocol can be: rpc (default if not defined) or rest
# List is limited to 256 servers max (1 to 256)
#server_1_name=localhost
#server_1_alias=Local WebUI
#server_1_port=61266
#server_1_protocol=rest
#server_1_alias=My local PC
#server_1_port=61209
#server_2_name=localhost
#server_2_alias=My local PC
#server_2_port=61209
#server_2_protocol=rpc
#server_2_port=61235
#server_3_name=192.168.0.17
#server_3_alias=Another PC on my network
#server_3_port=61209
#server_1_protocol=rpc
#server_4_name=notagooddefinition
#server_4_name=pasbon
#server_4_port=61237
[passwords]
@ -641,16 +558,11 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created
# Can be overwrite by the --graph-path command line option
path=/tmp/glances
path=/tmp
# It is possible to generate the graphs automatically by setting the
# generate_every to a non zero value corresponding to the seconds between
# two generation. Set it to 0 to disable graph auto generation.
@ -664,7 +576,7 @@ style=DarkStyle
[influxdb]
# !!!
# Will be DEPRECATED in future release.
# Please have a look on the new influxdb3 export module
# Please have a look on the new influxdb2 export module (compatible with InfluxDB 1.8.x and 2.x)
# !!!
# Configuration for the --export influxdb option
# https://influxdb.com/
@ -693,28 +605,7 @@ port=8086
protocol=http
org=nicolargo
bucket=glances
token=PUT_YOUR_INFLUXDB2_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
# Prefix will be added for all measurement name
# Ex: prefix=foo
# => foo.cpu
# => foo.mem
# You can also use dynamic values
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname and name (for process) are always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
[influxdb3]
# Configuration for the --export influxdb3 option
# https://influxdb.com/
host=http://localhost:8181
org=nicolargo
database=glances
token=PUT_YOUR_INFLUXDB3_TOKEN_HERE
token=EjFUTWe8U-MIseEAkaVIgVnej_TrnbdvEcRkaB1imstW7gapSqy6_6-8XD-yd51V0zUUpDy-kAdVD1purDLuxA==
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
@ -880,26 +771,6 @@ prefix=glances
# By default, system_name = FQDN
#system_name=mycomputer
[timescaledb]
# Configuration for the --export timescaledb option
# https://www.timescale.com/
host=localhost
port=5432
db=glances
user=postgres
password=password
# Overwrite device name (default is the FQDN)
# Most of the time, you should not overwrite this value
#hostname=mycomputer
[nats]
# Configuration for the --export nats option
# https://nats.io/
# Host is a separated list of NATS nodes
host=nats://localhost:4222
# Prefix for the subjects (default is 'glances')
prefix=glances
##############################################################################
# AMPS
# * enable: Enable (true) or disable (false) the AMP

View File

@ -9,12 +9,13 @@
# WARNING: the Alpine image version and Python version should be set.
# Alpine 3.18 tag is a link to the latest 3.18.x version.
# Be aware that if you change the Alpine version, you may have to change the Python version.
ARG IMAGE_VERSION=3.23
ARG IMAGE_VERSION=3.20
ARG PYTHON_VERSION=3.12
##############################################################################
# Base layer to be used for building dependencies and the release images
FROM alpine:${IMAGE_VERSION} AS base
FROM alpine:${IMAGE_VERSION} as base
# Upgrade the system
RUN apk update \
@ -34,7 +35,7 @@ RUN apk add --no-cache \
# BUILD Stages
##############################################################################
# BUILD: Base image shared by all build images
FROM base AS build
FROM base as build
ARG PYTHON_VERSION
RUN apk add --no-cache \
@ -55,27 +56,31 @@ RUN apk add --no-cache \
pkgconfig \
libffi-dev \
openssl-dev \
cmake
# for cmake: Issue: https://github.com/nicolargo/glances/issues/2735
cmake # Issue: https://github.com/nicolargo/glances/issues/2735
RUN python${PYTHON_VERSION} -m venv venv-build
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --upgrade pip
RUN python${PYTHON_VERSION} -m venv venv-build
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --upgrade pip
RUN python${PYTHON_VERSION} -m venv --without-pip venv
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
FROM build AS buildminimal
FROM build as buildMinimal
ARG PYTHON_VERSION
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r docker-requirements.txt
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
##############################################################################
# BUILD: Install all the deps
FROM build AS buildfull
FROM build as buildFull
ARG PYTHON_VERSION
# Required for optional dependency cassandra-driver
@ -84,13 +89,14 @@ ARG CASS_DRIVER_NO_CYTHON=1
ARG CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r all-requirements.txt
-r requirements.txt \
-r optional-requirements.txt
##############################################################################
# RELEASE Stages
##############################################################################
# Base image shared by all releases
FROM base AS release
FROM base as release
ARG PYTHON_VERSION
# Copy source code and config file
@ -102,48 +108,40 @@ COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# Copy binary and update PATH
COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# EXPOSE PORT (XMLRPC / WebUI)
EXPOSE 61209 61208
# Add glances user
# RUN addgroup -g 1000 glances && \
# adduser -D -u 1000 -G glances glances && \
# chown -R glances:glances /app
# Define default command.
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD /venv/bin/python3 -m glances $GLANCES_OPT
################################################################################
# RELEASE: minimal
FROM release AS minimal
FROM release as minimal
COPY --from=buildminimal /venv /venv
# USER glances
COPY --from=buildMinimal /venv /venv
################################################################################
# RELEASE: full
FROM release AS full
FROM release as full
RUN apk add --no-cache libzmq
COPY --from=buildfull /venv /venv
# USER glances
COPY --from=buildFull /venv /venv
################################################################################
# RELEASE: dev - to be compatible with CI
FROM full AS dev
FROM full as dev
# Add the specific logger configuration file for Docker dev
# All logs will be forwarded to stdout
COPY ./docker-files/docker-logger.json /app
ENV LOG_CFG=/app/docker-logger.json
# USER glances
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD /venv/bin/python3 -m glances $GLANCES_OPT

View File

@ -1,24 +1,22 @@
{
"version": 1,
"disable_existing_loggers": "False",
"root": { "level": "INFO", "handlers": ["console"] },
"formatters": {
"standard": { "format": "%(asctime)s -- %(levelname)s -- %(message)s" },
"short": { "format": "%(levelname)s -- %(message)s" },
"long": {
"format": "%(asctime)s -- %(levelname)s -- %(message)s (%(funcName)s in %(filename)s)"
},
"free": { "format": "%(message)s" }
},
"handlers": {
"console": { "class": "logging.StreamHandler", "formatter": "standard" }
},
"loggers": {
"debug": { "handlers": ["console"], "level": "DEBUG" },
"verbose": { "handlers": ["console"], "level": "INFO" },
"standard": { "handlers": ["console"], "level": "INFO" },
"requests": { "handlers": ["console"], "level": "ERROR" },
"elasticsearch": { "handlers": ["console"], "level": "ERROR" },
"elasticsearch.trace": { "handlers": ["console"], "level": "ERROR" }
}
}
"version": 1,
"disable_existing_loggers": "False",
"root": {"level": "INFO", "handlers": ["console"]},
"formatters": {
"standard": {"format": "%(asctime)s -- %(levelname)s -- %(message)s"},
"short": {"format": "%(levelname)s -- %(message)s"},
"long": {"format": "%(asctime)s -- %(levelname)s -- %(message)s (%(funcName)s in %(filename)s)"},
"free": {"format": "%(message)s"}
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "standard"}
},
"loggers": {
"debug": {"handlers": ["console"], "level": "DEBUG"},
"verbose": {"handlers": ["console"], "level": "INFO"},
"standard": {"handlers": ["console"], "level": "INFO"},
"requests": {"handlers": ["console"], "level": "ERROR"},
"elasticsearch": {"handlers": ["console"], "level": "ERROR"},
"elasticsearch.trace": {"handlers": ["console"], "level": "ERROR"}
}
}

View File

@ -13,7 +13,7 @@ ARG PYTHON_VERSION=3.12
##############################################################################
# Base layer to be used for building dependencies and the release images
FROM ubuntu:${IMAGE_VERSION} AS base
FROM ubuntu:${IMAGE_VERSION} as base
ARG DEBIAN_FRONTEND=noninteractive
RUN apt-get update \
@ -32,7 +32,7 @@ RUN apt-get update \
# BUILD Stages
##############################################################################
# BUILD: Base image shared by all build images
FROM base AS build
FROM base as build
ARG PYTHON_VERSION
ARG DEBIAN_FRONTEND=noninteractive
@ -55,29 +55,32 @@ RUN apt-get clean \
RUN python3 -m venv --without-pip venv
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
FROM build AS buildminimal
FROM build as buildMinimal
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r docker-requirements.txt
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
##############################################################################
# BUILD: Install all the deps
FROM build AS buildfull
FROM build as buildFull
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r all-requirements.txt
-r requirements.txt \
-r optional-requirements.txt
##############################################################################
# RELEASE Stages
##############################################################################
# Base image shared by all releases
FROM base AS release
FROM base as release
ARG PYTHON_VERSION
# Copy Glances source code and config file
@ -89,34 +92,28 @@ COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# Copy binary and update PATH
COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# EXPOSE PORT (XMLRPC / WebUI)
EXPOSE 61209 61208
# Add glances user
# NOTE: If used, the Glances Docker plugin do not work...
# UID and GUID 1000 are already configured for the ubuntu user
# Create anew one with UID and GUID 1001
# RUN groupadd -g 1001 glances && \
# useradd -u 1001 -g glances glances && \
# chown -R glances:glances /app
# Define default command.
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD /venv/bin/python3 -m glances $GLANCES_OPT
################################################################################
# RELEASE: minimal
FROM release AS minimal
FROM release as minimal
ARG PYTHON_VERSION
COPY --from=buildMinimal /venv /venv
# USER glances
################################################################################
# RELEASE: full
FROM release AS full
FROM release as full
ARG PYTHON_VERSION
RUN apt-get update \
@ -124,13 +121,11 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
COPY --from=buildfull /venv /venv
# USER glances
COPY --from=buildFull /venv /venv
################################################################################
# RELEASE: dev - to be compatible with CI
FROM full AS dev
FROM full as dev
ARG PYTHON_VERSION
# Add the specific logger configuration file for Docker dev
@ -138,8 +133,5 @@ ARG PYTHON_VERSION
COPY ./docker-files/docker-logger.json /app
ENV LOG_CFG=/app/docker-logger.json
# USER glances
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD /venv/bin/python3 -m glances $GLANCES_OPT

View File

@ -1,83 +1,10 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
annotated-doc==0.0.4
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.12.0
# via starlette
certifi==2025.11.12
# via requests
charset-normalizer==3.4.4
# via requests
click==8.1.8
# via uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via click
defusedxml==0.7.1
# via glances
docker==7.1.0
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via anyio
fastapi==0.128.0
# via glances
h11==0.16.0
# via uvicorn
idna==3.11
# via
# anyio
# requests
jinja2==3.1.6
# via glances
markupsafe==3.0.3
# via jinja2
packaging==25.0
# via glances
podman==5.6.0
# via glances
psutil==7.2.1
# via glances
pydantic==2.12.5
# via fastapi
pydantic-core==2.41.5
# via pydantic
python-dateutil==2.9.0.post0
# via glances
pywin32==311 ; sys_platform == 'win32'
# via docker
requests==2.32.5
# via
# docker
# glances
# podman
shtab==1.8.0 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# python-dateutil
starlette==0.50.0
# via fastapi
tomli==2.0.2 ; python_full_version < '3.11'
# via podman
typing-extensions==4.15.0
# via
# anyio
# fastapi
# pydantic
# pydantic-core
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
urllib3==2.6.2
# via
# docker
# podman
# requests
uvicorn==0.40.0
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances
# install with base requirements file
-r requirements.txt
docker>=6.1.1
orjson # JSON Serialization speedup
podman
python-dateutil
requests
six
urllib3

View File

@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = ../.venv/bin/sphinx-build
SPHINXBUILD = ../venv-dev/bin/sphinx-build
PAPER =
BUILDDIR = _build

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 76 KiB

After

Width:  |  Height:  |  Size: 91 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 203 KiB

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 31 KiB

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

View File

@ -3,7 +3,7 @@
Actions
=======
Glances can trigger actions on events for warning and critical thresholds.
Glances can trigger actions on events.
By ``action``, we mean all shell command line. For example, if you want
to execute the ``foo.py`` script if the last 5 minutes load are critical
@ -18,13 +18,6 @@ then add the ``_action`` line to the Glances configuration file:
All the stats are available in the command line through the use of the
`Mustache`_ syntax. `Chevron`_ is required to render the mustache's template syntax.
Additionaly to the stats of the current plugin, the following variables are
also available:
- ``{{time}}``: current time in ISO format
- ``{{critical}}``: critical threshold value
- ``{{warning}}``: warning threshold value
- ``{{careful}}``: careful threshold value
Another example would be to create a log file
containing used vs total disk space if a space trigger warning is
reached:
@ -33,7 +26,7 @@ reached:
[fs]
warning=70
warning_action=echo "{{time}} {{mnt_point}} {{used}}/{{size}}" > /tmp/fs.alert
warning_action=echo {{mnt_point}} {{used}}/{{size}} > /tmp/fs.alert
A last example would be to create a log file containing the total user disk
space usage for a device and notify by email each time a space trigger
@ -43,11 +36,13 @@ critical is reached:
[fs]
critical=90
critical_action_repeat=echo "{{time}} {{device_name}} {{percent}}" > /tmp/fs.alert && python /etc/glances/actions.d/fs-critical.py
critical_action_repeat=echo {{device_name}} {{percent}} > /tmp/fs.alert && python /etc/glances/actions.d/fs-critical.py
.. note::
Use && as separator for multiple commands
Within ``/etc/glances/actions.d/fs-critical.py``:
.. code-block:: python
@ -68,7 +63,7 @@ Within ``/etc/glances/actions.d/fs-critical.py``:
.. note::
You can use all the stats for the current plugin. See
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
for the stats list.
It is also possible to repeat action until the end of the alert.

View File

@ -31,9 +31,6 @@ under the ``[containers]`` section:
#show=showthisone,andthose.*
# Define the maximum containers size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Global containers' thresholds for CPU and MEM (in %)
cpu_careful=50
cpu_warning=70

View File

@ -5,16 +5,21 @@ Disk I/O
.. image:: ../_static/diskio.png
Glances displays the disk I/O throughput, count and mean latency:
Glances displays the disk I/O throughput. The unit is adapted
dynamically.
You can display:
- bytes per second (default behavior / Bytes/s, KBytes/s, MBytes/s, etc)
- requests per second (using --diskio-iops option or *B* hotkey)
- mean latency (using --diskio-latency option or *L* hotkey)
It's also possible to define:
There is no alert on this information.
It's possible to define:
- a list of disk to show (white list)
- a list of disks to hide
- aliases for disk name (use \ to espace special characters)
- aliases for disk name
under the ``[diskio]`` section in the configuration file.
@ -37,30 +42,6 @@ Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
It is also possible to define thesholds for latency and bytes read and write per second:
.. code-block:: ini
[diskio]
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
dm-0_rx_careful=4000000000
dm-0_rx_warning=5000000000
dm-0_rx_critical=6000000000
dm-0_rx_log=True
dm-0_tx_careful=700000000
dm-0_tx_warning=900000000
dm-0_tx_critical=1000000000
dm-0_tx_log=True
You also can automatically hide disk with no read or write using the
``hide_zero`` configuration key. The optional ``hide_threshold_bytes`` option
can also be used to set a threshold higher than zero.

View File

@ -35,11 +35,6 @@ system:
[fs]
allow=shm
With the above configuration key, it is also possible to monitor NFS
mount points (allow=nfs). Be aware that this can slow down the
performance of the plugin if the NFS server is not reachable. In this
case, the plugin will wait for a 2 seconds timeout.
Also, you can hide mount points using regular expressions.
To hide all mount points starting with /boot and /snap:

View File

@ -41,7 +41,6 @@ Legend:
hddtemp
ps
containers
vms
amps
events
actions

View File

@ -27,7 +27,7 @@ Stats description:
is in RAM.
- **inactive**: (UNIX): memory that is marked as not used.
- **buffers**: (Linux, BSD): cache for things like file system metadata.
- **cached**: (Linux, BSD): cache for various things (including ZFS cache).
- **cached**: (Linux, BSD): cache for various things.
Additional stats available in through the API:
@ -41,10 +41,6 @@ Additional stats available in through the API:
- **shared**: (BSD): memory that may be simultaneously accessed by multiple
processes.
It is possible to display the available memory instead of the used memory
by setting the ``available`` option to ``True`` in the configuration file
under the ``[mem]`` section.
A character is also displayed just after the MEM header and shows the
trend value:

View File

@ -20,7 +20,7 @@ Additionally, you can define:
- automatically hide interfaces not up
- automatically hide interfaces without IP address
- per-interface limit values
- aliases for interface name (use \ to espace special characters)
- aliases for interface name
The configuration should be done in the ``[network]`` section of the
Glances configuration file.
@ -66,13 +66,13 @@ Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
You also can automatically hide interface with no traffic using the
You also can automatically hide intercae with no traffic using the
``hide_zero`` configuration key. The optional ``hide_threshold_bytes`` option
can also be used to set a threshold higher than zero.
.. code-block:: ini
[network]
[diskio]
hide_zero=True
hide_threshold_bytes=0

View File

@ -102,8 +102,7 @@ Columns display
``CPU%`` % of CPU used by the process
If Irix/Solaris mode is off ('0' key), the value
is divided by logical core number (the column
name became CPUi)
is divided by logical core number
``MEM%`` % of MEM used by the process (RES divided by
the total RAM you have)
``VIRT`` Virtual Memory Size
@ -124,7 +123,6 @@ Columns display
The non-swapped physical memory a process is
using (what's currently in the physical memory).
``PID`` Process ID (column is replaced by NPROCS in accumulated mode)
``NPROCS`` Number of process + childs (only in accumulated mode)
``USER`` User ID
``THR`` Threads number of the process
``TIME+`` Cumulative CPU time used by the process
@ -149,24 +147,12 @@ Columns display
pressing on the ``'/'`` key
========================= ==============================================
Disable display of virtual memory
---------------------------------
It's possible to disable the display of the VIRT column (virtual memory) by adding the
``disable_virtual_memory=True`` option in the ``[processlist]`` section of the configuration
file (glances.conf):
.. code-block:: ini
[processlist]
disable_virtual_memory=True
Process filtering
-----------------
It's possible to filter the processes list using the ``ENTER`` key.
Glances filter syntax is the following (examples):
Filter syntax is the following (examples):
- ``python``: Filter processes name or command line starting with
*python* (regexp)
@ -175,25 +161,6 @@ Glances filter syntax is the following (examples):
- ``username:nicolargo``: Processes of nicolargo user (key:regexp)
- ``cmdline:\/usr\/bin.*``: Processes starting by */usr/bin*
Process focus
-------------
It's also possible to select a processes list to focus on.
A list of Glances filters (see upper) can be define from the command line:
.. code-block:: bash
glances --process-focus .*python.*,.*firefox.*
or the glances.conf file:
.. code-block:: ini
[processlist]
focus=.*python.*,.*firefox.*
Extended info
-------------

View File

@ -13,45 +13,29 @@ Glances can display the sensors information using ``psutil``,
- hard disk temperature
- battery capacity
Limit values and sensors alias names can be defined in the configuration
file under the ``[sensors]`` section.
Limit can be defined for a specific sensor, a type of sensor or defineby the system
thresholds (default behavor).
.. code-block:: ini
[sensors]
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
temperature_core_Ambient_careful=45
temperature_core_Ambient_warning=65
temperature_core_Ambient_critical=80
temperature_core_Ambient_log=False
# Overwrite thresholds for a specific type of sensor
#temperature_core_careful=45
#temperature_core_warning=65
#temperature_core_critical=80
#alias=temp1:Motherboard 0,core 0:CPU Core 0
There is no alert on this information.
.. note 1::
Limit values and sensors alias names can be defined in the
configuration file under the ``[sensors]`` section.
.. note 2::
The support for multiple batteries is only available if
you have the batinfo Python lib installed on your system
because for the moment PSUtil only support one battery.
.. note 2::
.. note 3::
If a sensors has temperature and fan speed with the same name unit,
it is possible to alias it using:
alias=unitname_temperature_core_alias:Alias for temp,unitname_fan_speed_alias:Alias for fan speed
.. note 3::
.. note 4::
If a sensors has multiple identical features names (see #2280), then
Glances will add a suffix to the feature name.
For example, if you have one sensor with two Composite features, the
second one will be named Composite_1.
.. note 4::
.. note 5::
The plugin could crash on some operating system (FreeBSD) with the
TCP or UDP blackhole option > 0 (see issue #2106). In this case, you
should disable the sensors (--disable-plugin sensors or from the

View File

@ -22,33 +22,3 @@ How to read the information:
.. warning::
This plugin needs administrator rights. Please run Glances as root/admin.
Also, you can hide driver using regular expressions.
To hide device you should use the hide option:
.. code-block:: ini
[smart]
hide=.*Hide_this_device.*
It is also possible to configure a white list of devices to display.
Example to show only the specified drive:
.. code-block:: ini
[smart]
show=.*Show_this_device.*
Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
.. _regex101: https://regex101.com/
You can also hide attributes, for example Self-tests, Errors, etc. Use a comma separated list.
.. code-block:: ini
[smart]
hide_attributes=attribute_name1,attribute_name2

View File

@ -1,36 +0,0 @@
.. _vms:
VMs
===
Glances ``vms`` plugin is designed to display stats about VMs ran on the host.
It's actually support two engines: `Multipass` and `Virsh`.
No Python dependency is needed but Multipass and Virsh binary should be available:
- multipass should be executable from /snap/bin/multipass
- virsh should be executable from /usr/bin/virsh
Note: CPU information is not availble for Multipass VM. Load is not available for Virsh VM.
Configuration file options:
.. code-block:: ini
[vms]
disable=True
# Define the maximum VMs size name (default is 20 chars)
max_name_size=20
# By default, Glances only display running VMs with states:
# 'Running', 'Paused', 'Starting' or 'Restarting'
# Set the following key to True to display all VMs regarding their states
all=False
You can use all the variables ({{foo}}) available in the containers plugin.
Filtering (for hide or show) is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
.. _Multipass: https://canonical.com/multipass
.. _Virsh: https://www.libvirt.org/manpages/virsh.html

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,2 @@
#!/bin/sh
make clean
make html
LC_ALL=C make man

View File

@ -18,10 +18,6 @@ Command-Line Options
enable debug mode
.. option:: --print-completion
generate shell tab completion scripts for Glances CLI
.. option:: -C CONF_FILE, --config CONF_FILE
path to the configuration file
@ -124,8 +120,7 @@ Command-Line Options
.. option:: --browser
start TUI Central Glances Browser
use --browser -w to start WebUI Central Glances Browser
start the client browser (list of servers)
.. option:: --disable-autodiscover
@ -352,7 +347,7 @@ The following commands (key pressed) are supported while in Glances:
Show/hide RAID plugin
``s``
Show/hide sensors plugin
Show/hide sensors stats
``S``
Enable/disable spark lines
@ -369,9 +364,6 @@ The following commands (key pressed) are supported while in Glances:
``U``
View cumulative network I/O
``V``
Show/hide VMS plugin
``w``
Delete finished warning log messages
@ -416,17 +408,11 @@ The following commands (key pressed) are supported while in Glances:
``F5`` or ``CTRL-R``
Refresh user interface
``SHIFT-LEFT``
``LEFT``
Navigation left through the process sort
``SHIFT-RIGHT``
Navigation right through the process sort
``LEFT``
Navigation left through the process name
``RIGHT``
Navigation right through the process name
Navigation right through the process sort
``UP``
Up in the processes list

View File

@ -58,7 +58,7 @@ try:
year = datetime.utcfromtimestamp(int(os.environ['SOURCE_DATE_EPOCH'])).year
except (KeyError, ValueError):
year = datetime.now().year
copyright = f'{year}, {author}'
copyright = '%d, %s' % (year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the

View File

@ -17,9 +17,9 @@ Location
You can place your ``glances.conf`` file in the following locations:
==================== =============================================================
``Linux``, ``SunOS`` ~/.config/glances/, /etc/glances/, /usr/share/doc/glances/
``*BSD`` ~/.config/glances/, /usr/local/etc/glances/, /usr/share/doc/glances/
``macOS`` ~/.config/glances/, ~/Library/Application Support/glances/, /usr/local/etc/glances/, /usr/share/doc/glances/
``Linux``, ``SunOS`` ~/.config/glances/, /etc/glances/, /usr/share/docs/glances/
``*BSD`` ~/.config/glances/, /usr/local/etc/glances/, /usr/share/docs/glances/
``macOS`` ~/.config/glances/, ~/Library/Application Support/glances/, /usr/local/etc/glances/, /usr/share/docs/glances/
``Windows`` %APPDATA%\\glances\\glances.conf
``All`` + <venv_root_folder>/share/doc/glances/
==================== =============================================================
@ -80,7 +80,7 @@ than a second one concerning the user interface:
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -95,10 +95,6 @@ than a second one concerning the user interface:
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile=./glances.local+3-key.pem
#ssl_keyfile_password=kfp
#ssl_certfile=./glances.local+3.pem
Each plugin, export module, and application monitoring process (AMP) can
have a section. Below is an example for the CPU plugin:

View File

@ -187,7 +187,7 @@ and make it visible to your container by adding it to ``docker-compose.yml`` as
image: nicolargo/glances:latest
restart: always
environment:
- "GLANCES_OPT=-w --password"
- GLANCES_OPT="-w --password"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Uncomment the below line if you want glances to display host OS detail instead of container's

View File

@ -3,51 +3,8 @@
F.A.Q
=====
Any encoding issue ?
--------------------
*Any encoding issue ?*
Try to run Glances with the following command line:
LANG=en_US.UTF-8 LC_ALL= glances
Container memory stats not displayed ?
--------------------------------------
On ARM64, Docker needs to be configured to allow access to the memory stats.
Edit the /boot/firmware/cmdline.txt and add the following configuration key:
cgroup_enable=memory
Netifaces issue ?
-----------------
Previously, Glances uses Netifaces to get network interfaces information.
Now, Glances uses Netifaces2.
Please uninstall Netifaces and install Netifaces2 instead.
Extra note: Glances 4.5 or higher do not use Netifaces/Netifaces2 anymore.
On Debian/Ubuntu Operating Systems, Webserver display a blank screen ?
----------------------------------------------------------------------
For some reason, the Glances Debian/Ubuntu packages do not include the Web UI static files.
Please read: https://github.com/nicolargo/glances/issues/2021 for workaround and more information.
Glances said that my computer has no free memory, is it normal ?
----------------------------------------------------------------
On Linux, Glances shows by default the free memory.
Free memory can be low, it's a "normal" behavior because Linux uses free memory for disk caching
to improve performance. More information can be found here: https://linuxatemyram.com/.
If you want to display the "available" memory instead of the "free" memory, you can uses the
the following configuration key in the Glances configuration file:
[mem]
# Display available memory instead of used memory
available=True

View File

@ -1,46 +0,0 @@
.. _fetch:
Fetch
=====
The fetch mode is used to get and share a quick look of a machine using the
``fetch`` option. In this mode, current stats are displayed on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Some examples are provided in the ``conf/fetch-templates/`` directory. Please feel free to
customize them or create your own template (contribution via PR are welcome).
The format of the template is based on the Jinja2 templating engine and can use all the stats
available in Glances through the ``gl`` variable (an instance of the :ref:`Glances Python API<api>`).
For example, the default template is define as:
.. code-block:: jinja
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} ⚡ {{ process['cpu_percent'] }}% CPU{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }} ⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

View File

@ -34,17 +34,16 @@ CONFIGURATION
EXAMPLES
--------
Monitor local machine, also called standalone mode,
with the Text-based user interface (TUI):
Monitor local machine (standalone mode):
$ glances
To monitor the local machine with the Web user interface (WebUI),
To monitor the local machine with the web interface (Web UI),
, run the following command line:
$ glances -w
then, open a Web Browser to the provided URL.
then, open a web browser to the provided URL.
Monitor local machine and export stats to a CSV file:
@ -72,16 +71,10 @@ Connect to a Glances server and export stats to a StatsD server:
$ glances -c <ip_server> --export statsd
Start the TUI Central Glances Browser:
Start the client browser (browser mode):
$ glances --browser
Start the WebUI Central Glances Browser (new in Glances 4.3 or higher):
$ glances --browser -w
If you do not want to see the local Glances Web Server in the browser list please use --disable-autodiscover option.
AUTHOR
------

View File

@ -1,110 +0,0 @@
.. _duckdb:
DuckDB
===========
DuckDB is an in-process SQL OLAP database management system.
You can export statistics to a ``DuckDB`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[duckdb]
# database defines where data are stored, can be one of:
# /path/to/glances.db (see https://duckdb.org/docs/stable/clients/python/dbapi#file-based-connection)
# :memory:glances (see https://duckdb.org/docs/stable/clients/python/dbapi#in-memory-connection)
# Or anyone else supported by the API (see https://duckdb.org/docs/stable/clients/python/dbapi)
database=/tmp/glances.db
and run Glances with:
.. code-block:: console
$ glances --export duckdb
Data model
-----------
The data model is composed of one table per Glances plugin.
Example:
.. code-block:: python
>>> import duckdb
>>> db = duckdb.connect(database='/tmp/glances.db', read_only=True)
>>> db.sql("SELECT * from cpu")
┌─────────────────────┬─────────────────┬────────┬────────┬────────┬───┬────────────────────┬─────────────────────┬──────────────────────┬──────────────────────┬──────────────────────┐
│ time │ hostname_id │ total │ user │ nice │ … │ cpu_iowait_warning │ cpu_iowait_critical │ cpu_ctx_switches_c… │ cpu_ctx_switches_w… │ cpu_ctx_switches_c… │
│ time with time zone │ varchar │ double │ double │ double │ │ double │ double │ double │ double │ double │
├─────────────────────┼─────────────────┼────────┼────────┼────────┼───┼────────────────────┼─────────────────────┼──────────────────────┼──────────────────────┼──────────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ 8.0 │ 5.6 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:27+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:29+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:31+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:33+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:35+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:37+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:39+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:41+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:43+00 │ nicolargo-xps15 │ 12.2 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:32+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:34+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:36+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:38+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:40+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:42+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:44+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:46+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:48+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
├─────────────────────┴─────────────────┴────────┴────────┴────────┴───┴────────────────────┴─────────────────────┴──────────────────────┴──────────────────────┴──────────────────────┤
│ 41 rows (20 shown) 47 columns (10 shown) │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
>>> db.sql("SELECT * from cpu").fetchall()[0]
(datetime.time(11, 50, 25, tzinfo=datetime.timezone.utc), 'nicolargo-xps15', 8.0, 5.6, 0.0, 2.3, 91.9, 0.1, 0.0, 0.0, 0.0, 0, 0, 0, 0, 16, 2.4103684425354004, 90724823, 0, 63323797, 0, 30704572, 0, 0, 0, 1200.0, 65.0, 75.0, 85.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, 5.0, 5.625, 6.25, 640000.0, 720000.0, 800000.0)
>>> db.sql("SELECT * from network")
┌─────────────────────┬─────────────────┬────────────────┬────────────┬────────────┬───┬─────────────────────┬────────────────┬────────────────────┬────────────────────┬───────────────────┐
│ time │ hostname_id │ key_id │ bytes_sent │ bytes_recv │ … │ network_tx_critical │ network_hide │ network_hide_no_up │ network_hide_no_ip │ network_hide_zero │
│ time with time zone │ varchar │ varchar │ int64 │ int64 │ │ double │ varchar │ boolean │ boolean │ boolean │
├─────────────────────┼─────────────────┼────────────────┼────────────┼────────────┼───┼─────────────────────┼────────────────┼────────────────────┼────────────────────┼───────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ interface_name │ 407761 │ 32730 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:27+00 │ nicolargo-xps15 │ interface_name │ 2877 │ 4857 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:29+00 │ nicolargo-xps15 │ interface_name │ 44504 │ 32555 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:31+00 │ nicolargo-xps15 │ interface_name │ 1092285 │ 48600 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:33+00 │ nicolargo-xps15 │ interface_name │ 150119 │ 43805 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:35+00 │ nicolargo-xps15 │ interface_name │ 34424 │ 14825 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:37+00 │ nicolargo-xps15 │ interface_name │ 19382 │ 33614 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:39+00 │ nicolargo-xps15 │ interface_name │ 53060 │ 39780 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:41+00 │ nicolargo-xps15 │ interface_name │ 371914 │ 78626 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:43+00 │ nicolargo-xps15 │ interface_name │ 82356 │ 60612 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ interface_name │ 3766 │ 9977 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:32+00 │ nicolargo-xps15 │ interface_name │ 188036 │ 18668 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:34+00 │ nicolargo-xps15 │ interface_name │ 543 │ 2451 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:36+00 │ nicolargo-xps15 │ interface_name │ 8247 │ 7275 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:38+00 │ nicolargo-xps15 │ interface_name │ 7252 │ 986 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:40+00 │ nicolargo-xps15 │ interface_name │ 172 │ 132 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:42+00 │ nicolargo-xps15 │ interface_name │ 8080 │ 6640 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:44+00 │ nicolargo-xps15 │ interface_name │ 19660 │ 17830 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:46+00 │ nicolargo-xps15 │ interface_name │ 1007030 │ 84170 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:48+00 │ nicolargo-xps15 │ interface_name │ 128947 │ 18087 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
├─────────────────────┴─────────────────┴────────────────┴────────────┴────────────┴───┴─────────────────────┴────────────────┴────────────────────┴────────────────────┴───────────────────┤
│ 41 rows (20 shown) 28 columns (10 shown) │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
.. _duckdb: https://duckdb.org/

View File

@ -3,24 +3,8 @@
Gateway To Other Services
=========================
Glances can exports stats in files or to other services like databases, message queues, etc.
Each exporter has its own configuration options, which can be set in the Glances
configuration file (`glances.conf`).
A common options section is also available:
is the `exclude_fields` option, which allows you to specify
.. code-block:: ini
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
This section describes the available exporters and how to configure them:
Glances can exports stats to a CSV file. Also, it can act as a gateway
to providing stats to multiple services (see list below).
.. toctree::
:maxdepth: 2
@ -30,18 +14,15 @@ This section describes the available exporters and how to configure them:
couchdb
elastic
graph
graphite
influxdb
json
kafka
mqtt
mongodb
nats
opentsdb
prometheus
rabbitmq
restful
riemann
statsd
timescaledb
zeromq

View File

@ -17,19 +17,19 @@ Glances InfluxDB data model:
+---------------+-----------------------+-----------------------+
| Measurement | Fields | Tags |
+===============+=======================+=======================+
| cpu | user | hostname |
| cpu | user | hostname |
| | system | |
| | iowait... | |
+---------------+-----------------------+-----------------------+
| network | read_bytes | hostname |
| | write_bytes | disk_name |
| | time_since_update... | |
| | | |
|  | | |
+---------------+-----------------------+-----------------------+
| diskio | rx | hostname |
| | tx | interface_name |
| | time_since_update... | |
| | | |
|  | | |
+---------------+-----------------------+-----------------------+
| docker | cpu_percent | hostname |
| | memory_usage... | name |
@ -78,7 +78,7 @@ configuration file (no limit on columns number).
Note: if you want to use SSL, please set 'protocol=https'.
InfluxDB v2 (from InfluxDB v1.8.x/Flux and InfluxDB <v3.x)
InfluxDB v2 (from InfluxDB v1.8.x/Flux and InfluxDB v2.x)
---------------------------------------------------------
Note: The InfluxDB v2 client (https://pypi.org/project/influxdb-client/)
@ -90,14 +90,12 @@ following:
.. code-block:: ini
[influxdb2]
# Configuration for the --export influxdb2 option
# https://influxdb.com/
host=localhost
port=8086
protocol=http
org=nicolargo
bucket=glances
token=PUT_YOUR_INFLUXDB2_TOKEN_HERE
token=EjFUTWe8U-MIseEAkaVIgVnej_TrnbdvEcRkaB1imstW7gapSqy6_6-8XD-yd51V0zUUpDy-kAdVD1purDLuxA==
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
@ -109,7 +107,7 @@ following:
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname and name (for process) are always added as a tag
# Note: hostname is always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
and run Glances with:
@ -120,46 +118,6 @@ and run Glances with:
Note: if you want to use SSL, please set 'protocol=https'.
InfluxDB v3 (for InfluxDB 3.x)
------------------------------
Note: The InfluxDB v3 client (https://pypi.org/project/influxdb3-python/)
is only available for Python 3.8 or higher.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[influxdb3]
# Configuration for the --export influxdb3 option
# https://influxdb.com/
host=http://localhost:8181
org=nicolargo
database=glances
token=PUT_YOUR_INFLUXDB3_TOKEN_HERE
# Set the interval between two exports (in seconds)
# If the interval is set to 0, the Glances refresh time is used (default behavor)
#interval=0
# Prefix will be added for all measurement name
# Ex: prefix=foo
# => foo.cpu
# => foo.mem
# You can also use dynamic values
#prefix=foo
# Following tags will be added for all measurements
# You can also use dynamic values.
# Note: hostname and name (for process) are always added as a tag
#tags=foo:bar,spam:eggs,domain:`domainname`
and run Glances with:
.. code-block:: console
$ glances --export influxdb3
Note: if you want to use SSL, please set host with 'https' scheme instead of 'http'.
Grafana
-------

View File

@ -1,68 +0,0 @@
.. _nats:
NATS
====
NATS is a message broker.
You can export statistics to a ``NATS`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[nats]
host=nats://localhost:4222
prefix=glances
and run Glances with:
.. code-block:: console
$ glances --export nats
Data model
-----------
Glances stats are published as JSON messagesto the following subjects:
<prefix>.<plugin>
Example:
CPU stats are published to glances.cpu
So a simple Python client will subscribe to this subject with:
import asyncio
import nats
async def main():
nc = nats.NATS()
await nc.connect(servers=["nats://localhost:4222"])
future = asyncio.Future()
async def cb(msg):
nonlocal future
future.set_result(msg)
await nc.subscribe("glances.cpu", cb=cb)
# Wait for message to come in
print("Waiting (max 30 seconds) for a message on 'glances' subject...")
msg = await asyncio.wait_for(future, 30)
print(msg.subject, msg.data)
if __name__ == '__main__':
asyncio.run(main())
To subscribe to all Glannces stats use wildcard:
await nc.subscribe("glances.*", cb=cb)

View File

@ -1,48 +0,0 @@
.. _timescale:
TimeScaleDB
===========
TimescaleDB is a time-series database built on top of PostgreSQL.
You can export statistics to a ``TimescaleDB`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[timescaledb]
host=localhost
port=5432
db=glances
user=postgres
password=password
and run Glances with:
.. code-block:: console
$ glances --export timescaledb
Data model
-----------
Each plugin will create an `hypertable`_ in the TimescaleDB database.
Tables are partitionned by time (using the ``time`` column).
Tables are segmented by hostname (in order to have multiple host stored in the Glances database).
For plugin with a key (example network where the key is the interface name), the key will
be added as a column in the table (named key_id) and added to the timescaledb.segmentby option.
Current limitations
-------------------
Sensors, Fs and DiskIO plugins are not supported by the TimescaleDB exporter.
In the cpu plugin, the user field is exported as user_cpu (user_percpu in the percpu plugin)
because user is a reserved keyword in PostgreSQL.
.. _hypertable: https://docs.tigerdata.com/use-timescale/latest/hypertables/

View File

@ -11,11 +11,12 @@ information depending on the terminal size.
It can also work in client/server mode. Remote monitoring can be
done via terminal, Web interface, or API (XMLRPC and RESTful).
Stats can also be exported to :ref:`files or external databases<gw>`.
Glances is written in Python and uses the `psutil`_ library to get
information from your system.
It is also possible to use it in your own Python scripts thanks to
the :ref:`Glances API<api>` or in any other application through
the :ref:`RESTful API<api_restful>`.
Stats can also be exported to external time/value databases.
.. _psutil: https://github.com/giampaolo/psutil
Table of Contents
=================
@ -29,11 +30,7 @@ Table of Contents
config
aoa/index
gw/index
api/python
api/restful
api
docker
faq
support
.. _psutil: https://github.com/giampaolo/psutil

View File

@ -34,19 +34,5 @@ To upgrade Glances and all its dependencies to the latest versions:
For additional installation methods, read the official `README`_ file.
Shell tab completion
====================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
For example, on a Linux operating system with Bash shell:
.. code-block:: console
$ glances --print-completion bash | sudo tee -a /etc/bash_completion.d/glances
$ source /etc/bash_completion.d/glances
Following shells are supported: bash, zsh and tcsh.
.. _psutil: https://github.com/giampaolo/psutil
.. _README: https://github.com/nicolargo/glances/blob/master/README.rst

View File

@ -1,4 +1,3 @@
'\" t
.\" Man page generated from reStructuredText.
.
.
@ -28,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
.TH "GLANCES" "1" "Jan 03, 2026" "4.4.2_dev1" "Glances"
.TH "GLANCES" "1" "Nov 01, 2024" "4.2.1" "Glances"
.SH NAME
glances \- An eye on your system
.SH SYNOPSIS
@ -64,11 +63,6 @@ enable debug mode
.UNINDENT
.INDENT 0.0
.TP
.B \-\-print\-completion
generate shell tab completion scripts for Glances CLI
.UNINDENT
.INDENT 0.0
.TP
.B \-C CONF_FILE, \-\-config CONF_FILE
path to the configuration file
.UNINDENT
@ -195,8 +189,7 @@ run Glances in server mode
.INDENT 0.0
.TP
.B \-\-browser
start TUI Central Glances Browser
use browser \-w to start WebUI Central Glances Browser
start the client browser (list of servers)
.UNINDENT
.INDENT 0.0
.TP
@ -458,7 +451,7 @@ Reset history
Show/hide RAID plugin
.TP
.B \fBs\fP
Show/hide sensors plugin
Show/hide sensors stats
.TP
.B \fBS\fP
Enable/disable spark lines
@ -475,9 +468,6 @@ Sort processes by USER
.B \fBU\fP
View cumulative network I/O
.TP
.B \fBV\fP
Show/hide VMS plugin
.TP
.B \fBw\fP
Delete finished warning log messages
.TP
@ -522,17 +512,11 @@ Switch between process command line or command name
.B \fBF5\fP or \fBCTRL\-R\fP
Refresh user interface
.TP
.B \fBSHIFT\-LEFT\fP
.B \fBLEFT\fP
Navigation left through the process sort
.TP
.B \fBSHIFT\-RIGHT\fP
Navigation right through the process sort
.TP
.B \fBLEFT\fP
Navigation left through the process name
.TP
.B \fBRIGHT\fP
Navigation right through the process name
Navigation right through the process sort
.TP
.B \fBUP\fP
Up in the processes list
@ -574,24 +558,25 @@ A template is available in the \fB/usr{,/local}/share/doc/glances\fP
.sp
You can place your \fBglances.conf\fP file in the following locations:
.TS
box center;
l|l.
center;
|l|l|.
_
T{
\fBLinux\fP, \fBSunOS\fP
T} T{
~/.config/glances/, /etc/glances/, /usr/share/doc/glances/
~/.config/glances/, /etc/glances/, /usr/share/docs/glances/
T}
_
T{
\fB*BSD\fP
T} T{
~/.config/glances/, /usr/local/etc/glances/, /usr/share/doc/glances/
~/.config/glances/, /usr/local/etc/glances/, /usr/share/docs/glances/
T}
_
T{
\fBmacOS\fP
T} T{
~/.config/glances/, ~/Library/Application Support/glances/, /usr/local/etc/glances/, /usr/share/doc/glances/
~/.config/glances/, ~/Library/Application Support/glances/, /usr/local/etc/glances/, /usr/share/docs/glances/
T}
_
T{
@ -608,6 +593,7 @@ T} T{
<venv_root_folder>/share/doc/glances/
.UNINDENT
T}
_
.TE
.INDENT 0.0
.IP \(bu 2
@ -626,7 +612,8 @@ A first section (called global) is available:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[global]
# Refresh rate (default is a minimum of 2 seconds)
# Can be overwritten by the \-t <sec> option
@ -642,7 +629,8 @@ history_size=1200
# Define external directory for loading additional plugins
# The layout follows the glances standard for plugin definitions
#plugin_dir=/home/user/dev/plugins
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -650,7 +638,8 @@ than a second one concerning the user interface:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[outputs]
# Options for all UIs
#\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-
@ -672,7 +661,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross\-origin requests.
@ -687,11 +676,8 @@ max_processes_display=25
# Comma separated list of HTTP request headers that should be supported for cross\-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile=./glances.local+3\-key.pem
#ssl_keyfile_password=kfp
#ssl_certfile=./glances.local+3.pem
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -700,7 +686,8 @@ have a section. Below is an example for the CPU plugin:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[cpu]
disable=False
refresh=3
@ -716,7 +703,8 @@ system_critical=90
steal_careful=50
steal_warning=70
steal_critical=90
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -724,7 +712,8 @@ an InfluxDB export module:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[influxdb]
# Configuration for the \-\-export influxdb option
# https://influxdb.com/
@ -735,7 +724,8 @@ password=root
db=glances
prefix=localhost
#tags=foo:bar,spam:eggs
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -743,7 +733,8 @@ or a Nginx AMP:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[amp_nginx]
# Nginx status page should be enabled (https://easyengine.io/tutorials/nginx/status\-page/)
enable=true
@ -751,7 +742,8 @@ regex=\e/usr\e/sbin\e/nginx
refresh=60
one_line=false
status_url=http://localhost/nginx_status
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -761,11 +753,13 @@ of an InfluxDB export to the current hostname, use:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[influxdb]
\&...
prefix=\(gahostname\(ga
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -773,11 +767,13 @@ Or if you want to add the Operating System name as a tag:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
[influxdb]
\&...
tags=system:\(gauname \-a\(ga
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.SH LOGGING
@ -800,7 +796,8 @@ format):
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
{
\(dqversion\(dq: 1,
\(dqdisable_existing_loggers\(dq: \(dqFalse\(dq,
@ -859,7 +856,8 @@ format):
}
}
}
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -867,9 +865,11 @@ and start Glances using the following command line:
.INDENT 0.0
.INDENT 3.5
.sp
.EX
.nf
.ft C
LOG_CFG=<path>/glances.json glances
.EE
.ft P
.fi
.UNINDENT
.UNINDENT
.sp
@ -882,15 +882,14 @@ is hosted.
.UNINDENT
.SH EXAMPLES
.sp
Monitor local machine, also called standalone mode,
with the Text\-based user interface (TUI):
Monitor local machine (standalone mode):
.INDENT 0.0
.INDENT 3.5
$ glances
.UNINDENT
.UNINDENT
.sp
To monitor the local machine with the Web user interface (WebUI),
To monitor the local machine with the web interface (Web UI),
, run the following command line:
.INDENT 0.0
.INDENT 3.5
@ -898,7 +897,7 @@ $ glances \-w
.UNINDENT
.UNINDENT
.sp
then, open a Web Browser to the provided URL.
then, open a web browser to the provided URL.
.sp
Monitor local machine and export stats to a CSV file:
.INDENT 0.0
@ -944,25 +943,16 @@ $ glances \-c <ip_server> export statsd
.UNINDENT
.UNINDENT
.sp
Start the TUI Central Glances Browser:
Start the client browser (browser mode):
.INDENT 0.0
.INDENT 3.5
$ glances browser
.UNINDENT
.UNINDENT
.sp
Start the WebUI Central Glances Browser (new in Glances 4.3 or higher):
.INDENT 0.0
.INDENT 3.5
$ glances browser \-w
.UNINDENT
.UNINDENT
.sp
If you do not want to see the local Glances Web Server in the browser list please use disable\-autodiscover option.
.SH AUTHOR
.sp
Nicolas Hennion aka Nicolargo <\X'tty: link mailto:contact@nicolargo.com'\fI\%contact@nicolargo.com\fP\X'tty: link'>
.SH COPYRIGHT
2026, Nicolas Hennion
2024, Nicolas Hennion
.\" Generated by docutils manpage writer.
.

View File

@ -4,12 +4,11 @@ Quickstart
==========
This page gives a good introduction to how to get started with Glances.
Glances offers multiple modes:
Glances offers three modes:
- Standalone
- Client/Server
- Web server
- Fetch
Standalone Mode
---------------
@ -89,8 +88,8 @@ available network interfaces) and TCP port is ``61209``.
In client/server mode, limits are set by the server side.
Central Glances Browser
^^^^^^^^^^^^^^^^^^^^^^^
Central client
^^^^^^^^^^^^^^
.. image:: _static/browser.png
@ -117,30 +116,22 @@ Example:
Glances can also detect and display all Glances servers available on
your network via the ``zeroconf`` protocol (not available on Windows):
To start the TUI Central Glances Browser, use the following option:
To start the central client, use the following option:
.. code-block:: console
client$ glances --browser
.. note::
Use ``--disable-autodiscover`` to disable the auto-discovery mode.
When the list is displayed, you can navigate through the Glances servers with
up/down keys. It is also possible to sort the server using:
- '1' is normal (do not sort)
- '2' is using sorting with ascending order (ONLINE > SNMP > PROTECTED > OFFLINE > UNKNOWN)
- '3' is using sorting with descending order (UNKNOWN > OFFLINE > PROTECTED > SNMP > ONLINE)
To start the WebUI Central Glances Browser (new in Glances 4.3 or higher), use the following option:
.. code-block:: console
client$ glances --browser -w
Open the URL (/browser) and click on the server to display stats.
.. note::
Use ``--disable-autodiscover`` to disable the auto-discovery mode.
SNMP
^^^^
@ -197,7 +188,7 @@ Here's a screenshot from Chrome on Android:
.. image:: _static/screenshot-web2.png
How do you protect your server (or Web server) with a login/password ?
----------------------------------------------------------------------
------------------------------------------------------------------
You can set a password to access the server using the ``--password``.
By default, the login is ``glances`` but you can change it with
@ -223,22 +214,3 @@ file:
# Additionally (and optionally) a default password could be defined
localhost=mylocalhostpassword
default=mydefaultpassword
Fetch mode
----------
It is also possible to get and share a quick look of a machine using the
``fetch`` mode. In this mode, current stats are display on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Have a look to the :ref:`fetch documentation page<fetch>` to learn how to create your own template.

View File

@ -1,33 +0,0 @@
import json
from unittest.mock import patch
from fastapi.openapi.utils import get_openapi
from glances.main import GlancesMain
# sys.path.append('./glances/outputs')
from glances.outputs.glances_restful_api import GlancesRestfulApi
# Init Glances core
testargs = ["glances", "-C", "./conf/glances.conf"]
with patch('sys.argv', testargs):
core = GlancesMain()
test_config = core.get_config()
test_args = core.get_args()
app = GlancesRestfulApi(config=test_config, args=test_args)._app
with open('./docs/api/openapi.json', 'w') as f:
json.dump(
get_openapi(
title=app.title,
version=app.version,
# Set the OenAPI version
# It's an hack to make openapi.json compatible with tools like https://editor.swagger.io/
# Please read https://fastapi.tiangolo.com/reference/fastapi/?h=openapi#fastapi.FastAPI.openapi_version
openapi_version="3.0.2",
description=app.description,
routes=app.routes,
),
f,
)

View File

@ -1,13 +0,0 @@
import json
from glances.outputs.glances_curses import _GlancesCurses
print(
json.dumps(
{
"topMenu": list(_GlancesCurses._top),
"leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"],
},
indent=4,
)
)

View File

@ -1,523 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "592b8135-c06b-41b7-895e-9dd70787f6ac",
"metadata": {},
"source": [
"# Use Glances API in your Python code"
]
},
{
"cell_type": "markdown",
"id": "e5ec86ae-ce2b-452f-b715-54e746026a96",
"metadata": {},
"source": [
"## Init the Glances API"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ba9b3546-65a0-4eec-942b-1855ff5c5d32",
"metadata": {},
"outputs": [],
"source": [
"from glances import api"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e81ad928-3b61-4654-8589-13cb29e7f292",
"metadata": {},
"outputs": [],
"source": [
"gl = api.GlancesAPI()"
]
},
{
"cell_type": "markdown",
"id": "6ec912a3-0875-4cdb-8539-e84ffb27768a",
"metadata": {},
"source": [
"## Get plugins list"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1ce57a13-a90d-4d65-b4a4-2bc45112697e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['alert',\n",
" 'ports',\n",
" 'diskio',\n",
" 'containers',\n",
" 'processcount',\n",
" 'programlist',\n",
" 'gpu',\n",
" 'percpu',\n",
" 'vms',\n",
" 'system',\n",
" 'network',\n",
" 'cpu',\n",
" 'amps',\n",
" 'processlist',\n",
" 'load',\n",
" 'sensors',\n",
" 'uptime',\n",
" 'now',\n",
" 'connections',\n",
" 'fs',\n",
" 'wifi',\n",
" 'ip',\n",
" 'help',\n",
" 'version',\n",
" 'psutilversion',\n",
" 'core',\n",
" 'mem',\n",
" 'folders',\n",
" 'quicklook',\n",
" 'memswap',\n",
" 'raid']"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.plugins()"
]
},
{
"cell_type": "markdown",
"id": "d5be2964-7a28-4b93-9dd0-1481afd2ee50",
"metadata": {},
"source": [
"## Get CPU stats"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0d1636d2-3f3e-44d4-bb67-45487384f79f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'total': 3.8, 'user': 3.0, 'nice': 0.0, 'system': 0.8, 'idle': 96.1, 'iowait': 0.1, 'irq': 0.0, 'steal': 0.0, 'guest': 0.0, 'ctx_switches': 0, 'interrupts': 0, 'soft_interrupts': 0, 'syscalls': 0, 'cpucore': 16, 'time_since_update': 141.46278643608093, 'ctx_switches_gauge': 12830371, 'ctx_switches_rate_per_sec': 0, 'interrupts_gauge': 9800040, 'interrupts_rate_per_sec': 0, 'soft_interrupts_gauge': 3875931, 'soft_interrupts_rate_per_sec': 0, 'syscalls_gauge': 0, 'syscalls_rate_per_sec': 0}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.cpu"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "99681a33-045e-43bf-927d-88b15872fad0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3.1"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.cpu.get('total')"
]
},
{
"cell_type": "markdown",
"id": "07e30de4-8f2a-4110-9c43-2a87d91dbf24",
"metadata": {},
"source": [
"## Get MEMORY stats"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "33502d93-acf9-49c5-8bcd-0a0404b47829",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'total': 16422858752, 'available': 6726169136, 'percent': 59.0, 'used': 9696689616, 'free': 541847552, 'active': 8672595968, 'inactive': 5456875520, 'buffers': 354791424, 'cached': 6520318384, 'shared': 729960448}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.mem"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "404cd8d6-ac38-4830-8ead-4b747e0ca7b1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"6779998768"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.mem.get('available')"
]
},
{
"cell_type": "markdown",
"id": "74e27e9f-3240-4827-a754-3538b7d68119",
"metadata": {},
"source": [
"Display it in a user friendly way:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "fa83b40a-51e8-45fa-b478-d0fcc9de4639",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'6.28G'"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.auto_unit(gl.mem.get('available'))"
]
},
{
"cell_type": "markdown",
"id": "bfaf5b94-7c9c-4fdc-8a91-71f543cafa4b",
"metadata": {},
"source": [
"## Get NETWORK stats"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "a0ab2ce7-e9bd-4a60-9b90-095a9023dac7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'wlp0s20f3': {'bytes_sent': 1130903, 'bytes_recv': 2213272, 'speed': 0, 'key': 'interface_name', 'interface_name': 'wlp0s20f3', 'alias': 'WIFI', 'bytes_all': 3344175, 'time_since_update': 354.35748958587646, 'bytes_recv_gauge': 1108380679, 'bytes_recv_rate_per_sec': 6245.0, 'bytes_sent_gauge': 21062113, 'bytes_sent_rate_per_sec': 3191.0, 'bytes_all_gauge': 1129442792, 'bytes_all_rate_per_sec': 9437.0}}"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network"
]
},
{
"cell_type": "markdown",
"id": "b65f7280-d9f0-4719-9e10-8b78dc414bae",
"metadata": {},
"source": [
"Get the list of networks interfaces:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "1a55d32a-bd7d-4dfa-b239-8875c01f205e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['wlp0s20f3']"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.keys()"
]
},
{
"cell_type": "markdown",
"id": "8c7e0215-e96a-4f7e-a187-9b7bee1abcf9",
"metadata": {},
"source": [
"Get stats for a specific network interface:"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "9aacfb32-c0e3-4fc7-b1d2-d216e46088cd",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'bytes_sent': 118799,\n",
" 'bytes_recv': 275052,\n",
" 'speed': 0,\n",
" 'key': 'interface_name',\n",
" 'interface_name': 'wlp0s20f3',\n",
" 'alias': 'WIFI',\n",
" 'bytes_all': 393851,\n",
" 'time_since_update': 46.24822926521301,\n",
" 'bytes_recv_gauge': 1108795793,\n",
" 'bytes_recv_rate_per_sec': 5947.0,\n",
" 'bytes_sent_gauge': 21268464,\n",
" 'bytes_sent_rate_per_sec': 2568.0,\n",
" 'bytes_all_gauge': 1130064257,\n",
" 'bytes_all_rate_per_sec': 8516.0}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.get('wlp0s20f3')"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "4f5ae513-6022-4a52-8d6c-e8b62afacc24",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5105.0"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.get('wlp0s20f3').get('bytes_recv_rate_per_sec')"
]
},
{
"cell_type": "markdown",
"id": "8b0bdbf4-e386-44aa-9585-1d042f0ded5d",
"metadata": {},
"source": [
"## Additional information"
]
},
{
"cell_type": "markdown",
"id": "5c52a0c7-06fb-432a-bdb7-9921f432d5a6",
"metadata": {},
"source": [
"Example for the LOAD plugin."
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "99303a2b-52a3-440f-a896-ad4951a9de34",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': 1.01123046875, 'min5': 0.83447265625, 'min15': 0.76171875, 'cpucore': 16}"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load"
]
},
{
"cell_type": "markdown",
"id": "7a560824-2787-4436-b39b-63de0c455536",
"metadata": {},
"source": [
"Get the limit configured in the glances.conf:"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "cbbc6a81-623f-4eff-9d08-e6a8b5981660",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 1 minute.',\n",
" 'unit': 'float'},\n",
" 'min5': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 5 minutes.',\n",
" 'unit': 'float'},\n",
" 'min15': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 15 minutes.',\n",
" 'unit': 'float'},\n",
" 'cpucore': {'description': 'Total number of CPU core.', 'unit': 'number'}}"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.fields_description"
]
},
{
"cell_type": "markdown",
"id": "2bd51d13-77e3-48f0-aa53-af86df6425f8",
"metadata": {},
"source": [
"Get field description and unit:"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "8682edcf-a8b9-424c-976f-2a301a05be6a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history_size': 1200.0,\n",
" 'load_disable': ['False'],\n",
" 'load_careful': 0.7,\n",
" 'load_warning': 1.0,\n",
" 'load_critical': 5.0}"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.get_limits()"
]
},
{
"cell_type": "raw",
"id": "3c671ff8-3a0c-48d3-8247-6081c69c19a9",
"metadata": {},
"source": [
"Get current stats views regarding limits:"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "45e03e9b-233c-4359-bcbc-7d2f06aca1c6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': {'decoration': 'DEFAULT',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'min5': {'decoration': 'OK',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'min15': {'decoration': 'OK_LOG',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'cpucore': {'decoration': 'DEFAULT',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False}}"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.get_views()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.14.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -2,3 +2,36 @@ You are in the main Glances source folder. This page is **ONLY** for developers.
If you are looking for the user manual, please follow this link:
https://glances.readthedocs.io/en/stable/
===
__init__.py Global module init
__main__.py Entry point for Glances module
config.py Manage the configuration file
globals.py Share variables upon modules
main.py Main script to rule them up...
client.py Glances client
server.py Glances server
webserver.py Glances web server (Based on FastAPI)
autodiscover.py Glances autodiscover module (via zeroconf)
standalone.py Glances standalone (curses interface)
password.py Manage password for Glances client/server
stats.py The stats manager
timer.py The timer class
actions.py Manage trigger actions (via mustache)
snmp.py Glances SNMP client (via pysnmp)
...
plugins
=> Glances plugins
...
outputs
=> Glances UI
glances_curses.py The curses interface
glances_restful-api.py The HTTP/API & Web based interface
...
exports
=> Glances exports
...
amps
=> Glances Application Monitoring Processes (AMP)
...

View File

@ -19,8 +19,7 @@ import tracemalloc
# Global name
# Version should start and end with a numerical char
# See https://packaging.python.org/specifications/core-metadata/#version
# Examples: 1.0.0, 1.0.0rc1, 1.1.0_dev1
__version__ = "4.4.2_dev1"
__version__ = "4.2.1"
__apiversion__ = '4'
__author__ = 'Nicolas Hennion <nicolas@nicolargo.com>'
__license__ = 'LGPLv3'
@ -53,10 +52,10 @@ if psutil_version_info < psutil_min_version:
# Trac malloc is only available on Python 3.4 or higher
def __signal_handler(sig, frame):
logger.debug(f"Signal {sig} caught")
# Avoid Glances hang when killing process with muliple CTRL-C See #3264
signal.signal(signal.SIGINT, signal.SIG_IGN)
def __signal_handler(signal, frame):
logger.debug(f"Signal {signal} caught")
end()
@ -75,53 +74,6 @@ def end():
sys.exit(0)
def start_main_loop(args, start_duration):
logger.debug(f"Glances started in {start_duration.get()} seconds")
if args.stop_after:
logger.info(f'Glances will be stopped in ~{args.stop_after * args.time} seconds')
def check_memleak(args, mode):
if args.memory_leak:
wait = args.stop_after * args.time * args.memory_leak * 2
print(f'Memory leak detection, please wait ~{wait} seconds...')
# First run without dump to fill the memory
mode.serve_n(args.stop_after)
# Then start the memory-leak loop
snapshot_begin = tracemalloc.take_snapshot()
else:
snapshot_begin = None
return snapshot_begin
def setup_server_mode(args, mode):
if args.stdout_issue or args.stdout_api_restful_doc or args.stdout_api_doc:
# Serve once for issue and API documentation modes
mode.serve_issue()
else:
# Serve forever
mode.serve_forever()
def maybe_trace_memleak(args, snapshot_begin):
if args.trace_malloc or args.memory_leak:
snapshot_end = tracemalloc.take_snapshot()
if args.memory_leak:
snapshot_diff = snapshot_end.compare_to(snapshot_begin, 'filename')
memory_leak = sum([s.size_diff for s in snapshot_diff])
print(f"Memory consumption: {memory_leak / 1000:.1f}KB (see log for details)")
logger.info("Memory consumption (top 5):")
for stat in snapshot_diff[:5]:
logger.info(stat)
if args.trace_malloc:
# See more options here: https://docs.python.org/3/library/tracemalloc.html
top_stats = snapshot_end.statistics("filename")
print("[ Trace malloc - Top 10 ]")
for stat in top_stats[:10]:
print(stat)
def start(config, args):
"""Start Glances."""
@ -149,10 +101,40 @@ def start(config, args):
logger.info(f"Start {GlancesMode.__name__} mode")
mode = GlancesMode(config=config, args=args)
start_main_loop(args, start_duration)
snapshot_begin = check_memleak(args, mode)
setup_server_mode(args, mode)
maybe_trace_memleak(args, snapshot_begin)
# Start the main loop
logger.debug(f"Glances started in {start_duration.get()} seconds")
if args.stop_after:
logger.info(f'Glances will be stopped in ~{args.stop_after * args.time} seconds')
if args.memory_leak:
print(f'Memory leak detection, please wait ~{args.stop_after * args.time * args.memory_leak * 2} seconds...')
# First run without dump to fill the memory
mode.serve_n(args.stop_after)
# Then start the memory-leak loop
snapshot_begin = tracemalloc.take_snapshot()
if args.stdout_issue or args.stdout_apidoc:
# Serve once for issue/test mode
mode.serve_issue()
else:
# Serve forever
mode.serve_forever()
if args.memory_leak:
snapshot_end = tracemalloc.take_snapshot()
snapshot_diff = snapshot_end.compare_to(snapshot_begin, 'filename')
memory_leak = sum([s.size_diff for s in snapshot_diff])
print(f"Memory consumption: {memory_leak / 1000:.1f}KB (see log for details)")
logger.info("Memory consumption (top 5):")
for stat in snapshot_diff[:5]:
logger.info(stat)
elif args.trace_malloc:
# See more options here: https://docs.python.org/3/library/tracemalloc.html
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics("filename")
print("[ Trace malloc - Top 10 ]")
for stat in top_stats[:10]:
print(stat)
# Shutdown
mode.end()
@ -189,6 +171,3 @@ def main():
# Glances can be ran in standalone, client or server mode
start(config=core.get_config(), args=core.get_args())
# End of glances/__init__.py

View File

@ -66,7 +66,7 @@ class Amp(GlancesAmp):
"""Update the AMP"""
# Get the Nginx status
logger.debug('{}: Update stats using status URL {}'.format(self.NAME, self.get('status_url')))
res = requests.get(self.get('status_url'), timeout=15)
res = requests.get(self.get('status_url'))
if res.ok:
# u'Active connections: 1 \nserver accepts handled requests\n 1 1 1 \nReading: 0 Writing: 1 Waiting: 0 \n'
self.set_result(res.text.rstrip())

View File

@ -37,7 +37,7 @@ systemctl_cmd=/usr/bin/systemctl --plain
from subprocess import CalledProcessError, check_output
from glances.amps.amp import GlancesAmp
from glances.globals import to_ascii
from glances.globals import iteritems, to_ascii
from glances.logger import logger
@ -45,7 +45,7 @@ class Amp(GlancesAmp):
"""Glances' Systemd AMP."""
NAME = 'Systemd'
VERSION = '1.1'
VERSION = '1.0'
DESCRIPTION = 'Get services list from systemctl (systemd)'
AUTHOR = 'Nicolargo'
EMAIL = 'contact@nicolargo.com'
@ -77,7 +77,7 @@ class Amp(GlancesAmp):
status[column[c]] = 1
# Build the output (string) message
output = 'Services\n'
for k, v in status.items():
for k, v in iteritems(status):
output += f'{k}: {v}\n'
self.set_result(output, separator=' ')

View File

@ -34,6 +34,7 @@ service_cmd=/usr/bin/service --status-all
"""
from glances.amps.amp import GlancesAmp
from glances.globals import iteritems
from glances.logger import logger
from glances.secure import secure_popen
@ -42,7 +43,7 @@ class Amp(GlancesAmp):
"""Glances' Systemd AMP."""
NAME = 'SystemV'
VERSION = '1.1'
VERSION = '1.0'
DESCRIPTION = 'Get services list from service (initd)'
AUTHOR = 'Nicolargo'
EMAIL = 'contact@nicolargo.com'
@ -76,7 +77,7 @@ class Amp(GlancesAmp):
status['upstart'] += 1
# Build the output (string) message
output = 'Services\n'
for k, v in status.items():
for k, v in iteritems(status):
output += f'{k}: {v}\n'
self.set_result(output, separator=' ')

View File

@ -12,7 +12,7 @@ import os
import re
import threading
from glances.globals import amps_path, listkeys
from glances.globals import amps_path, iteritems, listkeys
from glances.logger import logger
from glances.processes import glances_processes
@ -90,7 +90,7 @@ class AmpsList:
processlist = glances_processes.get_list()
# Iter upon the AMPs dict
for k, v in self.get().items():
for k, v in iteritems(self.get()):
if not v.enable():
# Do not update if the enable tag is set
continue
@ -105,7 +105,7 @@ class AmpsList:
amps_list = self._build_amps_list(v, processlist)
if amps_list:
if len(amps_list) > 0:
# At least one process is matching the regex
logger.debug(f"AMPS: {len(amps_list)} processes {k} detected ({amps_list})")
# Call the AMP update method
@ -125,14 +125,18 @@ class AmpsList:
Search application monitored processes by a regular expression
"""
ret = []
try:
# Search in both cmdline and name (for kernel thread, see #1261)
ret = [
{'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}
for p in processlist
if re.search(amp_value.regex(), p['name'])
or ((cmdline := p.get('cmdline')) and re.search(amp_value.regex(), ' '.join(cmdline)))
]
for p in processlist:
if (re.search(amp_value.regex(), p['name']) is not None) or (
p['cmdline'] is not None
and p['cmdline'] != []
and re.search(amp_value.regex(), ' '.join(p['cmdline'])) is not None
):
ret.append(
{'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}
)
except (TypeError, KeyError) as e:
logger.debug(f"Can not build AMPS list ({e})")

View File

@ -1,117 +0,0 @@
#
# Glances - An eye on your system
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
from glances import __version__ as glances_version
from glances.globals import auto_unit, weak_lru_cache
from glances.main import GlancesMain
from glances.outputs.glances_bars import Bar
from glances.processes import sort_stats
from glances.stats import GlancesStats
plugin_dependencies_tree = {
'processlist': ['processcount'],
}
class GlancesAPI:
ttl = 2.0 # Default cache TTL in seconds
def __init__(self, config=None, args=None):
self.__version__ = glances_version.split('.')[0] # Get the major version
core = GlancesMain()
self.args = args if args is not None else core.get_args()
self.config = config if config is not None else core.get_config()
self._stats = GlancesStats(config=self.config, args=self.args)
# Set the cache TTL for the API
self.ttl = self.args.time if self.args.time is not None else self.ttl
# Init the stats of all plugins in order to ensure that rate are computed
self._stats.update()
@weak_lru_cache(maxsize=1, ttl=ttl)
def __getattr__(self, item):
"""Fallback to the stats object for any missing attributes."""
if item in self._stats.getPluginsList():
if item in plugin_dependencies_tree:
# Ensure dependencies are updated before accessing the plugin
for dependency in plugin_dependencies_tree[item]:
self._stats.get_plugin(dependency).update()
# Update the plugin stats
self._stats.get_plugin(item).update()
return self._stats.get_plugin(item)
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")
def plugins(self):
"""Return the list of available plugins."""
return self._stats.getPluginsList()
def auto_unit(self, number, low_precision=False, min_symbol='K', none_symbol='-'):
"""
Converts a numeric value into a human-readable string with appropriate units.
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
"""
return auto_unit(number, low_precision, min_symbol, none_symbol)
def bar(self, value, size=18, bar_char='', empty_char='', pre_char='', post_char=''):
"""
Generate a progress bar representation for a given value.
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
"""
b = Bar(
size, bar_char=bar_char, empty_char=empty_char, pre_char=pre_char, post_char=post_char, display_value=False
)
b.percent = value
return b.get()
def top_process(self, limit=3, sorted_by='cpu_percent', sorted_by_secondary='memory_percent'):
"""
Returns a list of the top processes sorted by specified criteria.
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
"""
# Exclude glances process from the top list
# because in fetch mode, Glances generate a CPU load
all_but_glances = [
p
for p in self._stats.get_plugin('processlist').get_raw()
if p['cmdline'] and 'glances' not in (p['cmdline'] or ())
]
return sort_stats(all_but_glances, sorted_by=sorted_by, sorted_by_secondary=sorted_by_secondary)[:limit]

View File

@ -10,14 +10,6 @@
from datetime import datetime
# Ugly hack waiting for Python 3.10 deprecation
try:
from datetime import UTC
except ImportError:
from datetime import timezone
UTC = timezone.utc
class GlancesAttribute:
def __init__(self, name, description='', history_max_size=None):
@ -81,7 +73,7 @@ class GlancesAttribute:
Value is a tuple: (<timestamp>, <new_value>)
"""
self._value = (datetime.now(UTC), new_value)
self._value = (datetime.now(), new_value)
self.history_add(self._value)
"""

View File

@ -11,7 +11,7 @@
import socket
import sys
from glances.globals import get_ip_address
from glances.globals import BSD
from glances.logger import logger
try:
@ -42,7 +42,7 @@ class AutoDiscovered:
def __init__(self):
# server_dict is a list of dict (JSON compliant)
# [ {'key': 'zeroconf name', ip': '172.1.2.3', 'port': 61209, 'protocol': 'rpc', 'cpu': 3, 'mem': 34 ...} ... ]
# [ {'key': 'zeroconf name', ip': '172.1.2.3', 'port': 61209, 'cpu': 3, 'mem': 34 ...} ... ]
self._server_list = []
def get_servers_list(self):
@ -53,14 +53,13 @@ class AutoDiscovered:
"""Set the key to the value for the server_pos (position in the list)."""
self._server_list[server_pos][key] = value
def add_server(self, name, ip, port, protocol='rpc'):
def add_server(self, name, ip, port):
"""Add a new server to the list."""
new_server = {
'key': name, # Zeroconf name with both hostname and port
'name': name.split(':')[0], # Short name
'ip': ip, # IP address seen by the client
'port': port, # TCP port
'protocol': str(protocol), # RPC or RESTFUL protocol
'username': 'glances', # Default username
'password': '', # Default password
'status': 'UNKNOWN', # Server status: 'UNKNOWN', 'OFFLINE', 'ONLINE', 'PROTECTED'
@ -111,18 +110,10 @@ class GlancesAutoDiscoverListener:
address = info.addresses[0] if info.addresses else info.parsed_addresses[0]
new_server_ip = socket.inet_ntoa(address)
new_server_port = info.port
new_server_protocol = info.properties[b'protocol'].decode() if b'protocol' in info.properties else 'rpc'
# Add server to the global dict
self.servers.add_server(
srv_name,
new_server_ip,
new_server_port,
protocol=new_server_protocol,
)
logger.info(
f"New {new_server_protocol} Glances server detected ({srv_name} from {new_server_ip}:{new_server_port})"
)
self.servers.add_server(srv_name, new_server_ip, new_server_port)
logger.info(f"New Glances server detected ({srv_name} from {new_server_ip}:{new_server_port})")
else:
logger.warning("New Glances server detected, but failed to be get Zeroconf ServiceInfo ")
return True
@ -132,14 +123,6 @@ class GlancesAutoDiscoverListener:
self.servers.remove_server(srv_name)
logger.info(f"Glances server {srv_name} removed from the autodetect list")
def update_service(self, zeroconf, srv_type, srv_name):
"""Update the server from the list.
Done by a dirty hack (remove + add).
"""
self.remove_service(zeroconf, srv_type, srv_name)
self.add_service(zeroconf, srv_type, srv_name)
logger.info(f"Glances server {srv_name} updated from the autodetect list")
class GlancesAutoDiscoverServer:
"""Implementation of the Zeroconf protocol (server side for the Glances client)."""
@ -187,8 +170,16 @@ class GlancesAutoDiscoverClient:
except OSError as e:
logger.error(f"Cannot start zeroconf: {e}")
if zeroconf_bind_address == '0.0.0.0':
zeroconf_bind_address = get_ip_address()[0]
# XXX *BSDs: Segmentation fault (core dumped)
# -- https://bitbucket.org/al45tair/netifaces/issues/15
if not BSD:
try:
# -B @ overwrite the dynamic IPv4 choice
if zeroconf_bind_address == '0.0.0.0':
zeroconf_bind_address = self.find_active_ip_address()
except KeyError:
# Issue #528 (no network interface available)
pass
# Ensure zeroconf_bind_address is an IP address not an host
zeroconf_bind_address = socket.gethostbyname(zeroconf_bind_address)
@ -205,7 +196,7 @@ class GlancesAutoDiscoverClient:
port=args.port,
weight=0,
priority=0,
properties={'protocol': 'rest' if args.webserver else 'rpc'},
properties={},
server=hostname,
)
except TypeError:
@ -218,7 +209,7 @@ class GlancesAutoDiscoverClient:
port=args.port,
weight=0,
priority=0,
properties={'protocol': 'rest' if args.webserver else 'rpc'},
properties={},
server=hostname,
)
try:
@ -230,6 +221,16 @@ class GlancesAutoDiscoverClient:
else:
logger.error("Cannot announce Glances server on the network: zeroconf library not found.")
@staticmethod
def find_active_ip_address():
"""Try to find the active IP addresses."""
import netifaces
# Interface of the default gateway
gateway_itf = netifaces.gateways()['default'][netifaces.AF_INET][1]
# IP address for the interface
return netifaces.ifaddresses(gateway_itf)[netifaces.AF_INET][0]['addr']
def close(self):
if zeroconf_tag:
self.zeroconf.unregister_service(self.info)

View File

@ -17,10 +17,6 @@ from glances import __version__
from glances.globals import json_loads
from glances.logger import logger
from glances.outputs.glances_curses import GlancesCursesClient
from glances.outputs.glances_stdout import GlancesStdout
from glances.outputs.glances_stdout_csv import GlancesStdoutCsv
from glances.outputs.glances_stdout_fetch import GlancesStdoutFetch
from glances.outputs.glances_stdout_json import GlancesStdoutJson
from glances.stats_client import GlancesStatsClient
from glances.timer import Counter
@ -77,7 +73,7 @@ class GlancesClient:
def log_and_exit(self, msg=''):
"""Log and exit."""
if not self.return_to_browser:
logger.critical(f"Error when connecting to Glances server: {msg}")
logger.critical(msg)
sys.exit(2)
else:
logger.error(msg)
@ -176,21 +172,6 @@ class GlancesClient:
if self.quiet:
# In quiet mode, nothing is displayed
logger.info("Quiet mode is ON: Nothing will be displayed")
elif self.args.stdout:
logger.info(f"Stdout mode is ON, following stats will be displayed: {self.args.stdout}")
# Init screen
self.screen = GlancesStdout(config=self.config, args=self.args)
elif self.args.stdout_json:
logger.info(f"Stdout JSON mode is ON, following stats will be displayed: {self.args.stdout_json}")
# Init screen
self.screen = GlancesStdoutJson(config=self.config, args=self.args)
elif self.args.stdout_csv:
logger.info(f"Stdout CSV mode is ON, following stats will be displayed: {self.args.stdout_csv}")
# Init screen
self.screen = GlancesStdoutCsv(config=self.config, args=self.args)
elif self.args.stdout_fetch:
logger.info("Fetch mode is ON")
self.screen = GlancesStdoutFetch(config=self.config, args=self.args)
else:
self.screen = GlancesCursesClient(config=self.config, args=self.args)
@ -256,7 +237,6 @@ class GlancesClient:
return self.client_mode
exit_key = False
try:
while True and not exit_key:
# Update the stats
@ -284,8 +264,8 @@ class GlancesClient:
else:
# In quiet mode, we only wait adapated_refresh seconds
time.sleep(adapted_refresh)
except Exception:
logger.critical("Critical error in client serve_forever loop")
except Exception as e:
logger.critical(e)
self.end()
return self.client_mode

View File

@ -1,19 +1,27 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2024 Nicolas Hennion <nicolas@nicolargo.com>
# SPDX-FileCopyrightText: 2022 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Manage the Glances client browser (list of Glances server)."""
import webbrowser
import threading
from glances.client import GlancesClient
from defusedxml import xmlrpc
from glances.autodiscover import GlancesAutoDiscoverServer
from glances.client import GlancesClient, GlancesClientTransport
from glances.globals import json_loads
from glances.logger import LOG_FILENAME, logger
from glances.outputs.glances_curses_browser import GlancesCursesBrowser
from glances.servers_list import GlancesServersList
from glances.password_list import GlancesPasswordList as GlancesPassword
from glances.static_list import GlancesStaticServer
# Correct issue #1025 by monkey path the xmlrpc lib
xmlrpc.monkey_patch()
class GlancesClientBrowser:
@ -23,28 +31,112 @@ class GlancesClientBrowser:
# Store the arg/config
self.args = args
self.config = config
self.static_server = None
self.password = None
# Init the server list
self.servers_list = GlancesServersList(config=config, args=args)
# Load the configuration file
self.load()
# Start the autodiscover mode (Zeroconf listener)
if not self.args.disable_autodiscover:
self.autodiscover_server = GlancesAutoDiscoverServer()
else:
self.autodiscover_server = None
# Init screen
self.screen = GlancesCursesBrowser(args=self.args)
def load(self):
"""Load server and password list from the configuration file."""
# Init the static server list (if defined)
self.static_server = GlancesStaticServer(config=self.config)
# Init the password list (if defined)
self.password = GlancesPassword(config=self.config)
def get_servers_list(self):
"""Return the current server list (list of dict).
Merge of static + autodiscover servers list.
"""
ret = []
if self.args.browser:
ret = self.static_server.get_servers_list()
if self.autodiscover_server is not None:
ret = self.static_server.get_servers_list() + self.autodiscover_server.get_servers_list()
return ret
def __get_uri(self, server):
"""Return the URI for the given server dict."""
# Select the connection mode (with or without password)
if server['password'] != "":
if server['status'] == 'PROTECTED':
# Try with the preconfigure password (only if status is PROTECTED)
clear_password = self.password.get_password(server['name'])
if clear_password is not None:
server['password'] = self.password.get_hash(clear_password)
return 'http://{}:{}@{}:{}'.format(server['username'], server['password'], server['ip'], server['port'])
return 'http://{}:{}'.format(server['ip'], server['port'])
def __update_stats(self, server):
"""Update stats for the given server (picked from the server list)"""
# Get the server URI
uri = self.__get_uri(server)
# Try to connect to the server
t = GlancesClientTransport()
t.set_timeout(3)
# Get common stats from Glances server
try:
s = xmlrpc.xmlrpc_client.ServerProxy(uri, transport=t)
except Exception as e:
logger.warning(f"Client browser couldn't create socket ({e})")
return server
# Get the stats
for column in self.static_server.get_columns():
server_key = column.get('plugin') + '_' + column.get('field')
if 'key' in column:
server_key += '_' + column.get('key')
try:
# Value
v_json = json_loads(s.getPlugin(column['plugin']))
if 'key' in column:
v_json = [i for i in v_json if i[i['key']].lower() == column['key'].lower()][0]
server[server_key] = v_json[column['field']]
# Decoration
d_json = json_loads(s.getPluginView(column['plugin']))
if 'key' in column:
d_json = d_json.get(column['key'])
server[server_key + '_decoration'] = d_json[column['field']]['decoration']
except (KeyError, IndexError, xmlrpc.xmlrpc_client.Fault) as e:
logger.debug(f"Error while grabbing stats form server ({e})")
except OSError as e:
logger.debug(f"Error while grabbing stats form server ({e})")
server['status'] = 'OFFLINE'
except xmlrpc.xmlrpc_client.ProtocolError as e:
if e.errcode == 401:
# Error 401 (Authentication failed)
# Password is not the good one...
server['password'] = None
server['status'] = 'PROTECTED'
else:
server['status'] = 'OFFLINE'
logger.debug(f"Cannot grab stats from server ({e.errcode} {e.errmsg})")
else:
# Status
server['status'] = 'ONLINE'
return server
def __display_server(self, server):
"""Connect and display the given server"""
# Display the Glances client for the selected server
logger.debug(f"Selected server {server}")
if server['protocol'].lower() == 'rest':
# Display a popup
self.screen.display_popup(
'Open the WebUI {}:{} in a Web Browser'.format(server['name'], server['port']), duration=1
)
# Try to open a Webbrowser
webbrowser.open(self.servers_list.get_uri(server), new=2, autoraise=1)
self.screen.active_server = None
return
# Connection can take time
# Display a popup
self.screen.display_popup('Connect to {}:{}'.format(server['name'], server['port']), duration=1)
@ -52,11 +144,8 @@ class GlancesClientBrowser:
# A password is needed to access to the server's stats
if server['password'] is None:
# First of all, check if a password is available in the [passwords] section
clear_password = self.servers_list.password.get_password(server['name'])
if (
clear_password is None
or self.servers_list.get_servers_list()[self.screen.active_server]['status'] == 'PROTECTED'
):
clear_password = self.password.get_password(server['name'])
if clear_password is None or self.get_servers_list()[self.screen.active_server]['status'] == 'PROTECTED':
# Else, the password should be enter by the user
# Display a popup to enter password
clear_password = self.screen.display_popup(
@ -64,7 +153,7 @@ class GlancesClientBrowser:
)
# Store the password for the selected server
if clear_password is not None:
self.set_in_selected('password', self.servers_list.password.get_hash(clear_password))
self.set_in_selected('password', self.password.get_hash(clear_password))
# Display the Glance client on the selected server
logger.info("Connect Glances client to the {} server".format(server['key']))
@ -82,7 +171,7 @@ class GlancesClientBrowser:
# Test if client and server are in the same major version
if not client.login():
self.screen.display_popup(
"Sorry, cannot connect to '{}'\nSee '{}' for more details".format(server['name'], LOG_FILENAME)
"Sorry, cannot connect to '{}'\n" "See '{}' for more details".format(server['name'], LOG_FILENAME)
)
# Set the ONLINE status for the selected server
@ -110,16 +199,31 @@ class GlancesClientBrowser:
def __serve_forever(self):
"""Main client loop."""
# No need to update the server list
# It's done by the GlancesAutoDiscoverListener class (autodiscover.py)
# Or define statically in the configuration file (module static_list.py)
# For each server in the list, grab elementary stats (CPU, LOAD, MEM, OS...)
thread_list = {}
while not self.screen.is_end:
# Update the stats in the servers list
self.servers_list.update_servers_stats()
logger.debug(f"Iter through the following server list: {self.get_servers_list()}")
for v in self.get_servers_list():
key = v["key"]
thread = thread_list.get(key, None)
if thread is None or thread.is_alive() is False:
thread = threading.Thread(target=self.__update_stats, args=[v])
thread_list[key] = thread
thread.start()
# Update the screen (list or Glances client)
if self.screen.active_server is None:
# Display Glances browser (servers list)
self.screen.update(self.servers_list.get_servers_list())
# Display the Glances browser
self.screen.update(self.get_servers_list())
else:
# Display selected Glances server
self.__display_server(self.servers_list.get_servers_list()[self.screen.active_server])
# Display the active server
self.__display_server(self.get_servers_list()[self.screen.active_server])
# exit key pressed
for thread in thread_list.values():
thread.join()
def serve_forever(self):
"""Wrapper to the serve_forever function.
@ -134,7 +238,13 @@ class GlancesClientBrowser:
def set_in_selected(self, key, value):
"""Set the (key, value) for the selected server in the list."""
self.servers_list.set_in_selected(self.screen.active_server, key, value)
# Static list then dynamic one
if self.screen.active_server >= len(self.static_server.get_servers_list()):
self.autodiscover_server.set_server(
self.screen.active_server - len(self.static_server.get_servers_list()), key, value
)
else:
self.static_server.set_server(self.screen.active_server, key, value)
def end(self):
"""End of the client browser session."""

View File

@ -81,20 +81,18 @@ def default_config_dir():
- Linux, SunOS, *BSD, macOS: /usr/share/doc (as defined in the setup.py files)
- Windows: %APPDATA%\glances
"""
paths = []
# Add system path
if LINUX or SUNOS or BSD or MACOS:
paths.append(os.path.join(sys.prefix, 'share', 'doc'))
else:
paths.append(os.environ.get('APPDATA'))
# If we are in venv (issue #2803), sys.prefix != sys.base_prefix and we
# already added venv path with sys.prefix. Add base_prefix path too
path = []
# Add venv path (solve issue #2803)
if in_virtualenv():
paths.append(os.path.join(sys.base_prefix, 'share', 'doc'))
path.append(os.path.join(sys.prefix, 'share', 'doc', 'glances'))
return [os.path.join(path, 'glances') if path is not None else '' for path in paths]
# Add others system path
if LINUX or SUNOS or BSD or MACOS:
path.append('/usr/share/doc')
else:
path.append(os.environ.get('APPDATA'))
return path
def in_virtualenv():
@ -263,8 +261,9 @@ class Config:
# Sensors
if not self.parser.has_section('sensors'):
self.parser.add_section('sensors')
self.set_default_cwc('sensors', 'temperature_core', cwc=['60', '70', '80'])
self.set_default_cwc('sensors', 'temperature_hdd', cwc=['45', '52', '60'])
self.set_default_cwc('sensors', 'battery', cwc=['70', '80', '90'])
self.set_default_cwc('sensors', 'battery', cwc=['80', '90', '95'])
# Process list
if not self.parser.has_section('processlist'):

View File

@ -8,8 +8,7 @@
"""CPU percent stats shared between CPU and Quicklook plugins."""
import platform
from typing import TypedDict
from typing import List, Optional, TypedDict
import psutil
@ -18,184 +17,11 @@ from glances.timer import Timer
__all__ = ["cpu_percent"]
CPU_IMPLEMENTERS = {
0x41: 'ARM Limited',
0x42: 'Broadcom',
0x43: 'Cavium',
0x44: 'DEC',
0x46: 'Fujitsu',
0x48: 'HiSilicon',
0x49: 'Infineon Technologies',
0x4D: 'Motorola/Freescale',
0x4E: 'NVIDIA',
0x50: 'Applied Micro (APM)',
0x51: 'Qualcomm',
0x53: 'Samsung',
0x56: 'Marvell',
0x61: 'Apple',
0x66: 'Faraday',
0x69: 'Intel',
0x6D: 'Microsoft',
0x70: 'Phytium',
0xC0: 'Ampere Computing',
}
CPU_PARTS = {
# ARM Limited (0x41)
0x41: {
0xD03: 'Cortex-A53',
0xD04: 'Cortex-A35',
0xD05: 'Cortex-A55',
0xD06: 'Cortex-A65',
0xD07: 'Cortex-A57',
0xD08: 'Cortex-A72',
0xD09: 'Cortex-A73',
0xD0A: 'Cortex-A75',
0xD0B: 'Cortex-A76',
0xD0C: 'Neoverse N1',
0xD0D: 'Cortex-A77',
0xD0E: 'Cortex-A76AE',
0xD13: 'Cortex-R52',
0xD20: 'Cortex-M23',
0xD21: 'Cortex-M33',
0xD40: 'Neoverse V1',
0xD41: 'Cortex-A78',
0xD42: 'Cortex-A78AE',
0xD43: 'Cortex-A65AE',
0xD44: 'Cortex-X1',
0xD46: 'Cortex-A510',
0xD47: 'Cortex-A710',
0xD48: 'Cortex-X2',
0xD49: 'Neoverse N2',
0xD4A: 'Neoverse E1',
0xD4B: 'Cortex-A78C',
0xD4C: 'Cortex-X1C',
0xD4D: 'Cortex-A715',
0xD4E: 'Cortex-X3',
0xD4F: 'Neoverse V2',
0xD80: 'Cortex-A520',
0xD81: 'Cortex-A720',
0xD82: 'Cortex-X4',
0xD84: 'Neoverse V3',
0xD85: 'Cortex-X925',
0xD87: 'Cortex-A725',
},
# Apple (0x61)
0x61: {
0x000: 'Swift',
0x001: 'Cyclone',
0x002: 'Typhoon',
0x003: 'Twister',
0x004: 'Hurricane',
0x005: 'Monsoon/Mistral',
0x006: 'Vortex/Tempest',
0x007: 'Lightning/Thunder',
0x008: 'Firestorm/Icestorm (M1)',
0x009: 'Avalanche/Blizzard (M2)',
0x00E: 'Everest/Sawtooth (M3)',
0x010: 'Blizzard/Avalanche (A16)',
0x011: 'Coll (M4)',
},
# Qualcomm (0x51)
0x51: {
0x00F: 'Scorpion',
0x02D: 'Scorpion',
0x04D: 'Krait',
0x06F: 'Krait',
0x201: 'Kryo',
0x205: 'Kryo',
0x211: 'Kryo',
0x800: 'Kryo 260/280 Gold (Cortex-A73)',
0x801: 'Kryo 260/280 Silver (Cortex-A53)',
0x802: 'Kryo 385 Gold (Cortex-A75)',
0x803: 'Kryo 385 Silver (Cortex-A55)',
0x804: 'Kryo 485 Gold (Cortex-A76)',
0x805: 'Kryo 485 Silver (Cortex-A55)',
0xC00: 'Falkor',
0xC01: 'Saphira',
},
# Samsung (0x53)
0x53: {
0x001: 'Exynos M1/M2',
0x002: 'Exynos M3',
0x003: 'Exynos M4',
0x004: 'Exynos M5',
},
# NVIDIA (0x4e)
0x4E: {
0x000: 'Denver',
0x003: 'Denver 2',
0x004: 'Carmel',
},
# Marvell (0x56)
0x56: {
0x131: 'Feroceon 88FR131',
0x581: 'PJ4/PJ4b',
0x584: 'PJ4B-MP',
},
# Cavium (0x43)
0x43: {
0x0A0: 'ThunderX',
0x0A1: 'ThunderX 88XX',
0x0A2: 'ThunderX 81XX',
0x0A3: 'ThunderX 83XX',
0x0AF: 'ThunderX2 99xx',
0x0B0: 'OcteonTX2',
0x0B1: 'OcteonTX2 T98',
0x0B2: 'OcteonTX2 T96',
0x0B3: 'OcteonTX2 F95',
0x0B4: 'OcteonTX2 F95N',
0x0B5: 'OcteonTX2 F95MM',
},
# Broadcom (0x42)
0x42: {
0x00F: 'Brahma B15',
0x100: 'Brahma B53',
0x516: 'Vulcan',
},
# HiSilicon (0x48)
0x48: {
0xD01: 'Kunpeng-920',
0xD40: 'Cortex-A76 (Kirin)',
},
# Ampere (0xc0)
0xC0: {
0xAC3: 'Ampere-1',
0xAC4: 'Ampere-1a',
},
# Fujitsu (0x46)
0x46: {
0x001: 'A64FX',
},
# Intel (0x69) - ARM-based chips
0x69: {
0x200: 'i80200',
0x210: 'PXA250A',
0x212: 'PXA210A',
0x242: 'i80321-400',
0x243: 'i80321-600',
0x290: 'PXA250B/PXA26x',
0x292: 'PXA210B',
0x2C2: 'i80321-400-B0',
0x2C3: 'i80321-600-B0',
0x2D0: 'PXA250C/PXA255/PXA26x',
0x2D2: 'PXA210C',
0x411: 'PXA27x',
0x41C: 'IPX425-533',
0x41D: 'IPX425-400',
0x41F: 'IPX425-266',
0x682: 'PXA32x',
0x683: 'PXA930/PXA935',
0x688: 'PXA30x',
0x689: 'PXA31x',
},
}
class CpuInfo(TypedDict):
cpu_name: str
cpu_hz: float | None
cpu_hz_current: float | None
cpu_hz: Optional[float]
cpu_hz_current: Optional[float]
class PerCpuPercentInfo(TypedDict):
@ -205,15 +31,15 @@ class PerCpuPercentInfo(TypedDict):
user: float
system: float
idle: float
nice: float | None
iowait: float | None
irq: float | None
softirq: float | None
steal: float | None
guest: float | None
guest_nice: float | None
dpc: float | None
interrupt: float | None
nice: Optional[float]
iowait: Optional[float]
irq: Optional[float]
softirq: Optional[float]
steal: Optional[float]
guest: Optional[float]
guest_nice: Optional[float]
dpc: Optional[float]
interrupt: Optional[float]
class CpuPercent:
@ -257,7 +83,7 @@ class CpuPercent:
self.cpu_info['cpu_hz_current'] = cpu_freq.current
else:
self.cpu_info['cpu_hz_current'] = None
if hasattr(cpu_freq, 'max') and cpu_freq.max != 0.0:
if hasattr(cpu_freq, 'max'):
self.cpu_info['cpu_hz'] = cpu_freq.max
else:
self.cpu_info['cpu_hz'] = None
@ -269,27 +95,17 @@ class CpuPercent:
def __get_cpu_name() -> str:
# Get the CPU name once from the /proc/cpuinfo file
# Read the first line with the "model name" ("Model" for Raspberry Pi)
ret = f'CPU {platform.processor()}'
try:
cpuinfo_lines = open('/proc/cpuinfo').readlines()
except (FileNotFoundError, PermissionError):
logger.debug("No permission to read '/proc/cpuinfo'")
return ret
return 'CPU'
cpu_implementer = None
for line in cpuinfo_lines:
# Look for the CPU name
if line.startswith('model name') or line.startswith('Model') or line.startswith('cpu model'):
return line.split(':')[1].strip()
# Look for the CPU name on ARM architecture (see #3127)
if line.startswith('CPU implementer'):
cpu_implementer = CPU_IMPLEMENTERS.get(int(line.split(':')[1].strip(), 16), ret)
ret = cpu_implementer
if line.startswith('CPU part') and cpu_implementer in CPU_PARTS:
cpu_part = CPU_PARTS[cpu_implementer].get(int(line.split(':')[1].strip(), 16), 'Unknown')
ret = f'{cpu_implementer} {cpu_part}'
return ret
return 'CPU'
def get_cpu(self) -> float:
"""Update and/or return the CPU using the psutil library."""
@ -305,7 +121,7 @@ class CpuPercent:
def _compute_cpu() -> float:
return psutil.cpu_percent(interval=0.0)
def get_percpu(self) -> list[PerCpuPercentInfo]:
def get_percpu(self) -> List[PerCpuPercentInfo]:
"""Update and/or return the per CPU list using the psutil library."""
# Never update more than 1 time per cached_timer_cpu
if self.timer_percpu.finished():
@ -315,7 +131,7 @@ class CpuPercent:
self.percpu_percent = self._compute_percpu()
return self.percpu_percent
def _compute_percpu(self) -> list[PerCpuPercentInfo]:
def _compute_percpu(self) -> List[PerCpuPercentInfo]:
psutil_percpu = enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True))
return [
{

View File

@ -271,7 +271,6 @@ class GlancesEventsList:
event_time, event_index, event_state, event_type, event_value, proc_list, proc_desc, global_message
)
# logger.info(self.events_list)
return self.len()
def _create_event(self, event_time, event_state, event_type, event_value, proc_desc, global_message):

View File

@ -1,19 +1,18 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2026 Nicolas Hennion <nicolas@nicolargo.com>
# SPDX-FileCopyrightText: 2022 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""
I am your father...
...for all Glances exports IF.
"""
import re
from glances.globals import NoOptionError, NoSectionError, json_dumps
from glances.globals import NoOptionError, NoSectionError, iteritems, iterkeys, json_dumps
from glances.logger import logger
from glances.timer import Counter
@ -21,14 +20,18 @@ from glances.timer import Counter
class GlancesExport:
"""Main class for Glances export IF."""
# List of non exportable internal plugins
# List of non exportable plugins
# @TODO: remove this part and make all plugins exportable (see issue #1556)
# @TODO: also make this list configurable by the user (see issue #1443)
non_exportable_plugins = [
"alert",
"help",
"plugin",
"psutilversion",
"quicklook",
"version",
'alert',
'amps',
'help',
'now',
'plugin',
'psutilversion',
'quicklook',
'version',
]
def __init__(self, config=None, args=None):
@ -52,13 +55,6 @@ class GlancesExport:
# Save last export list
self._last_exported_list = None
# Fields description
self._fields_description = None
# Load the default common export configuration
if self.config is not None:
self.load_common_conf()
def _log_result_decorator(fct):
"""Log (DEBUG) the result of the function fct."""
@ -77,25 +73,7 @@ class GlancesExport:
"""Close the export module."""
logger.debug(f"Finalise export interface {self.export_name}")
def load_common_conf(self):
"""Load the common export configuration in the Glances configuration file.
:returns: Boolean -- True if section is found
"""
# Read the common [export] section
section = "export"
opt = "exclude_fields"
try:
setattr(self, opt, self.config.get_list_value(section, opt))
except NoOptionError:
logger.debug(f"{opt} option not found in the {section} configuration section")
logger.debug(f"Load common {section} from the Glances configuration file")
return True
def load_conf(self, section, mandatories=["host", "port"], options=None):
def load_conf(self, section, mandatories=['host', 'port'], options=None):
"""Load the export <section> configuration in the Glances configuration file.
:param section: name of the export section to load
@ -125,10 +103,10 @@ class GlancesExport:
try:
setattr(self, opt, self.config.get_value(section, opt))
except NoOptionError:
logger.debug(f"{opt} option not found in the {section} configuration section")
pass
logger.debug(f"Load {section} from the Glances configuration file")
logger.debug(f"{section} parameters: { ({opt: getattr(self, opt) for opt in mandatories + options}) }")
logger.debug(f"{section} parameters: {({opt: getattr(self, opt) for opt in mandatories + options})}")
return True
@ -136,7 +114,7 @@ class GlancesExport:
"""Return the value of the item 'key'."""
ret = None
try:
ret = item[item["key"]]
ret = item[item['key']]
except KeyError:
logger.error(f"No 'key' available in {item}")
if isinstance(ret, list):
@ -152,81 +130,14 @@ class GlancesExport:
d_tags = {}
if tags:
try:
d_tags = dict(x.split(":", 1) for x in tags.split(","))
d_tags = dict([x.split(':') for x in tags.split(',')])
except ValueError:
# one of the 'key:value' pairs was missing
logger.info("Invalid tags passed: %s", tags)
logger.info('Invalid tags passed: %s', tags)
d_tags = {}
return d_tags
def normalize_for_influxdb(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
FIELD_TO_TAG = ["name", "cmdline", "type"]
ret = []
# Build initial dict by crossing columns and point
data_dict = dict(zip(columns, points))
# issue1871 - Check if a key exist. If a key exist, the value of
# the key should be used as a tag to identify the measurement.
keys_list = [k.split(".")[0] for k in columns if k.endswith(".key")]
if not keys_list:
keys_list = [None]
for measurement in keys_list:
# Manage field
if measurement is not None:
fields = {
k.replace(f"{measurement}.", ""): data_dict[k] for k in data_dict if k.startswith(f"{measurement}.")
}
else:
fields = data_dict
# Transform to InfluxDB data model
# https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_reference/
for k in fields:
# Do not export empty (None) value
if fields[k] is None:
continue
# Convert numerical to float
try:
fields[k] = float(fields[k])
except (TypeError, ValueError):
# Convert others to string
try:
fields[k] = str(fields[k])
except (TypeError, ValueError):
pass
# Manage tags
tags = self.parse_tags(self.tags)
# Add the hostname as a tag
tags["hostname"] = self.hostname
if "hostname" in fields:
fields.pop("hostname")
# Others tags...
if "key" in fields and fields["key"] in fields:
# Create a tag from the key
# Tag should be an string (see InfluxDB data model)
tags[fields["key"]] = str(fields[fields["key"]])
# Remove it from the field list (can not be a field and a tag)
fields.pop(fields["key"])
# Add name as a tag (example for the process list)
for k in FIELD_TO_TAG:
if k in fields:
tags[k] = str(fields[k])
# Remove it from the field list (can not be a field and a tag)
fields.pop(k)
# Add the measurement to the list
ret.append({"measurement": name, "tags": tags, "fields": fields})
return ret
def is_excluded(self, field):
"""Return true if the field is excluded."""
return any(re.fullmatch(i, field, re.I) for i in (getattr(self, 'exclude_fields') or ()))
def plugins_to_export(self, stats):
"""Return the list of plugins to export.
@ -239,21 +150,12 @@ class GlancesExport:
"""Return the list of plugins last exported."""
return self._last_exported_list
def init_fields(self, stats):
"""Return fields description in order to init stats in a server."""
if not self.export_enable:
return False
self._last_exported_list = self.plugins_to_export(stats)
self._fields_description = stats.getAllFieldsDescriptionAsDict(plugin_list=self.last_exported_list())
return self._fields_description
def update(self, stats):
"""Update stats to a server.
The method builds two lists: names and values and calls the export method to export the stats.
Note: if needed this class can be overwritten.
Note: this class can be overwritten (for example in CSV and Graph).
"""
if not self.export_enable:
return False
@ -267,14 +169,10 @@ class GlancesExport:
for plugin in self.last_exported_list():
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
# Remove the <plugin>_disable field
all_stats[plugin].pop(f"{plugin}_disable", None)
elif isinstance(all_stats[plugin], list):
# TypeError: string indices must be integers (Network plugin) #1054
for i in all_stats[plugin]:
i.update(all_limits[plugin])
# Remove the <plugin>_disable field
i.pop(f"{plugin}_disable", None)
else:
continue
export_names, export_values = self.build_export(all_stats[plugin])
@ -283,28 +181,24 @@ class GlancesExport:
return True
def build_export(self, stats):
"""Build the export lists.
This method builds two lists: names and values.
"""
# Initialize export lists
"""Build the export lists."""
export_names = []
export_values = []
if isinstance(stats, dict):
# Stats is a dict
# Is there a key ?
if "key" in stats and stats["key"] in stats:
pre_key = "{}.".format(stats[stats["key"]])
if 'key' in iterkeys(stats) and stats['key'] in iterkeys(stats):
pre_key = '{}.'.format(stats[stats['key']])
else:
pre_key = ""
pre_key = ''
# Walk through the dict
for key, value in sorted(stats.items()):
for key, value in sorted(iteritems(stats)):
if isinstance(value, bool):
value = json_dumps(value).decode()
value = json_dumps(value)
if isinstance(value, list):
value = " ".join([str(v) for v in value])
value = ' '.join([str(v) for v in value])
if isinstance(value, dict):
item_names, item_values = self.build_export(value)
@ -312,9 +206,6 @@ class GlancesExport:
export_names += item_names
export_values += item_values
else:
# We are on a simple value
if self.is_excluded(pre_key + key.lower()):
continue
export_names.append(pre_key + key.lower())
export_values.append(value)
elif isinstance(stats, list):

View File

@ -1,165 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2026 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""
I am your son...
...abstract class for AsyncIO-based Glances exports.
"""
import asyncio
import threading
import time
from abc import abstractmethod
from glances.exports.export import GlancesExport
from glances.logger import logger
class GlancesExportAsyncio(GlancesExport):
"""Abstract class for AsyncIO-based export modules.
This class manages a persistent event loop in a background thread,
allowing child classes to use AsyncIO operations for exporting data.
Child classes must implement:
- async _async_init(): AsyncIO initialization (e.g., connection setup)
- async _async_exit(): AsyncIO cleanup (e.g., disconnection)
- async _async_export(name, columns, points): AsyncIO export operation
"""
def __init__(self, config=None, args=None):
"""Init the AsyncIO export interface."""
super().__init__(config=config, args=args)
# AsyncIO event loop management
self.loop = None
self._loop_ready = threading.Event()
self._loop_exception = None
self._shutdown = False
# Start the background event loop thread
self._loop_thread = threading.Thread(target=self._run_event_loop, daemon=True)
self._loop_thread.start()
# Wait for the loop to be ready
if not self._loop_ready.wait(timeout=10):
raise RuntimeError("AsyncIO event loop failed to start within timeout")
if self._loop_exception:
raise RuntimeError(f"AsyncIO event loop creation failed: {self._loop_exception}")
if self.loop is None:
raise RuntimeError("AsyncIO event loop is None after initialization")
# Call child class AsyncIO initialization
future = asyncio.run_coroutine_threadsafe(self._async_init(), self.loop)
try:
future.result(timeout=10)
logger.debug(f"{self.export_name} AsyncIO export initialized successfully")
except Exception as e:
logger.warning(f"{self.export_name} AsyncIO initialization failed: {e}. Will retry in background.")
def _run_event_loop(self):
"""Run event loop in background thread."""
try:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self._loop_ready.set()
self.loop.run_forever()
except Exception as e:
self._loop_exception = e
self._loop_ready.set()
logger.error(f"{self.export_name} AsyncIO event loop thread error: {e}")
finally:
# Clean up pending tasks
pending = asyncio.all_tasks(self.loop)
for task in pending:
task.cancel()
if pending:
self.loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
self.loop.close()
@abstractmethod
async def _async_init(self):
"""AsyncIO initialization method.
Child classes should implement this method to perform AsyncIO-based
initialization such as connecting to servers, setting up clients, etc.
This method is called once during __init__ after the event loop is ready.
"""
pass
@abstractmethod
async def _async_exit(self):
"""AsyncIO cleanup method.
Child classes should implement this method to perform AsyncIO-based
cleanup such as disconnecting from servers, closing clients, etc.
This method is called during exit() before stopping the event loop.
"""
pass
@abstractmethod
async def _async_export(self, name, columns, points):
"""AsyncIO export method.
Child classes must implement this method to perform the actual
export operation using AsyncIO.
:param name: plugin name
:param columns: list of column names
:param points: list of values corresponding to columns
"""
pass
def exit(self):
"""Close the AsyncIO export module."""
super().exit()
self._shutdown = True
logger.info(f"{self.export_name} AsyncIO export shutting down")
# Call child class cleanup
if self.loop:
future = asyncio.run_coroutine_threadsafe(self._async_exit(), self.loop)
try:
future.result(timeout=5)
except Exception as e:
logger.error(f"{self.export_name} Error in AsyncIO cleanup: {e}")
# Stop the event loop
if self.loop:
self.loop.call_soon_threadsafe(self.loop.stop)
time.sleep(0.5)
logger.debug(f"{self.export_name} AsyncIO export shutdown complete")
def export(self, name, columns, points):
"""Export data using AsyncIO.
This method bridges the synchronous export() interface with
the AsyncIO _async_export() implementation.
"""
if self._shutdown:
logger.debug(f"{self.export_name} Export called during shutdown, skipping")
return
if not self.loop or not self.loop.is_running():
logger.error(f"{self.export_name} AsyncIO event loop is not running")
return
# Submit the export operation to the background event loop
try:
future = asyncio.run_coroutine_threadsafe(self._async_export(name, columns, points), self.loop)
# Don't block forever - use a short timeout
future.result(timeout=1)
except asyncio.TimeoutError:
logger.warning(f"{self.export_name} AsyncIO export timeout for {name}")
except Exception as e:
logger.error(f"{self.export_name} AsyncIO export error for {name}: {e}", exc_info=True)

View File

@ -105,7 +105,7 @@ class Export(GlancesExport):
logger.debug(f"Export {name} stats to Cassandra")
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
data = {k: float(v) for (k, v) in dict(zip(columns, points)).iteritems() if isinstance(v, Number)}
# Write input to the Cassandra table
try:

View File

@ -12,7 +12,7 @@
# How to test ?
#
# 1) docker run -d -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=admin -p 5984:5984 --name my-couchdb couchdb
# 2) .venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 2) ./venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 3) Result can be seen at: http://127.0.0.1:5984/_utils
#

View File

@ -80,8 +80,6 @@ class Export(GlancesExport):
# Loop over plugins to export
for plugin in self.plugins_to_export(stats):
export_names, export_values = self.build_export(all_stats[plugin])
# Add the plugin name in the field
export_names = [plugin + '.' + n for n in export_names]
if self.first_line:
csv_header += export_names
csv_data += export_values

View File

@ -1,195 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""DuckDB interface class."""
import sys
import time
from datetime import datetime
from platform import node
import duckdb
from glances.exports.export import GlancesExport
from glances.logger import logger
# Define the type conversions for DuckDB
# https://duckdb.org/docs/stable/clients/python/conversion
convert_types = {
'bool': 'BOOLEAN',
'int': 'BIGINT',
'float': 'DOUBLE',
'str': 'VARCHAR',
'tuple': 'VARCHAR', # Store tuples as VARCHAR (comma-separated)
'list': 'VARCHAR', # Store lists as VARCHAR (comma-separated)
'NoneType': 'VARCHAR',
}
class Export(GlancesExport):
"""This class manages the DuckDB export module."""
def __init__(self, config=None, args=None):
"""Init the DuckDB export IF."""
super().__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.db = None
# Optional configuration keys
self.user = None
self.password = None
self.hostname = None
# Load the configuration file
self.export_enable = self.load_conf(
'duckdb', mandatories=['database'], options=['user', 'password', 'hostname']
)
if not self.export_enable:
exit('Missing DuckDB config')
# The hostname is always add as an identifier in the DuckDB table
# so we can filter the stats by hostname
self.hostname = self.hostname or node().split(".")[0]
# Init the DuckDB client
self.client = self.init()
def init(self):
"""Init the connection to the DuckDB server."""
if not self.export_enable:
return None
try:
db = duckdb.connect(database=self.database)
except Exception as e:
logger.critical(f"Cannot connect to DuckDB {self.database} ({e})")
sys.exit(2)
else:
logger.info(f"Stats will be exported to DuckDB: {self.database}")
return db
def normalize(self, value):
# Nothing to do...
if isinstance(value, list) and len(value) == 1 and value[0] in ['True', 'False']:
return bool(value[0])
return value
def update(self, stats):
"""Update the DuckDB export module."""
if not self.export_enable:
return False
# Get all the stats & limits
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
# Loop over plugins to export
for plugin in self.last_exported_list():
# Remove some fields
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
# Remove the <plugin>_disable field
all_stats[plugin].pop(f"{plugin}_disable", None)
elif isinstance(all_stats[plugin], list):
for i in all_stats[plugin]:
i.update(all_limits[plugin])
# Remove the <plugin>_disable field
i.pop(f"{plugin}_disable", None)
else:
continue
plugin_stats = all_stats[plugin]
creation_list = [] # List used to create the DuckDB table
values_list = [] # List of values to insert (list of lists, one list per row)
if isinstance(plugin_stats, dict):
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
for key, value in plugin_stats.items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.extend([self.normalize(value) for value in plugin_stats.values()])
values_list = [item_list]
elif isinstance(plugin_stats, list) and len(plugin_stats) > 0 and 'key' in plugin_stats[0]:
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
creation_list.append('key_id VARCHAR')
for key, value in plugin_stats[0].items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
for plugin_item in plugin_stats:
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.append(self.normalize(f"{plugin_item.get('key')}"))
item_list.extend([self.normalize(value) for value in plugin_item.values()])
values_list.append(item_list)
else:
continue
# Export stats to DuckDB
self.export(plugin, creation_list, values_list)
return True
def export(self, plugin, creation_list, values_list):
"""Export the stats to the DuckDB server."""
logger.debug(f"Export {plugin} stats to DuckDB")
# Create the table if it does not exist
table_list = [t[0] for t in self.client.sql("SHOW TABLES").fetchall()]
if plugin not in table_list:
# Execute the create table query
create_query = f"""
CREATE TABLE {plugin} (
{', '.join(creation_list)}
);"""
logger.debug(f"Create table: {create_query}")
try:
self.client.execute(create_query)
except Exception as e:
logger.error(f"Cannot create table {plugin}: {e}")
return
# Commit the changes
self.client.commit()
# Insert values into the table
for values in values_list:
insert_query = f"""
INSERT INTO {plugin} VALUES (
{', '.join(['?' for _ in values])}
);"""
logger.debug(f"Insert values into table {plugin}: {values}")
try:
self.client.execute(insert_query, values)
except Exception as e:
logger.error(f"Cannot insert data into table {plugin}: {e}")
# Commit the changes
self.client.commit()
def exit(self):
"""Close the DuckDB export module."""
# Force last write
self.client.commit()
# Close the DuckDB client
time.sleep(3) # Wait a bit to ensure all data is written
self.client.close()
# Call the father method
super().exit()

View File

@ -17,7 +17,7 @@ import pygal.style
from pygal import DateTimeLine
from glances.exports.export import GlancesExport
from glances.globals import time_series_subsample
from glances.globals import iteritems, time_series_subsample
from glances.logger import logger
from glances.timer import Timer
@ -33,7 +33,7 @@ class Export(GlancesExport):
self.export_enable = self.load_conf('graph', options=['path', 'generate_every', 'width', 'height', 'style'])
# Manage options (command line arguments overwrite configuration file)
self.path = self.path or args.export_graph_path
self.path = args.export_graph_path or self.path
self.generate_every = int(getattr(self, 'generate_every', 0) or 0)
self.width = int(getattr(self, 'width', 800) or 800)
self.height = int(getattr(self, 'height', 600) or 600)
@ -120,7 +120,7 @@ class Export(GlancesExport):
x_label_rotation=20,
x_value_formatter=lambda dt: dt.strftime('%Y/%m/%d %H:%M:%S'),
)
for k, v in time_series_subsample(data, self.width).items():
for k, v in iteritems(time_series_subsample(data, self.width)):
chart.add(k, v)
chart.render_to_file(os.path.join(self.path, title + '.svg'))
return True

View File

@ -17,6 +17,8 @@ from influxdb.client import InfluxDBClientError
from glances.exports.export import GlancesExport
from glances.logger import logger
FIELD_TO_TAG = ['name', 'cmdline', 'type']
class Export(GlancesExport):
"""This class manages the InfluxDB export module."""
@ -31,22 +33,20 @@ class Export(GlancesExport):
self.db = None
# Optional configuration keys
self.protocol = "http"
self.protocol = 'http'
self.prefix = None
self.tags = None
self.hostname = None
# Load the InfluxDB configuration file
self.export_enable = self.load_conf(
"influxdb",
mandatories=["host", "port", "user", "password", "db"],
options=["protocol", "prefix", "tags"],
'influxdb', mandatories=['host', 'port', 'user', 'password', 'db'], options=['protocol', 'prefix', 'tags']
)
if not self.export_enable:
exit("Missing influxdb config")
exit('Missing INFLUXDB version 1 config')
# The hostname is always add as a tag
self.hostname = node().split(".")[0]
self.hostname = node().split('.')[0]
# Init the InfluxDB client
self.client = self.init()
@ -57,7 +57,7 @@ class Export(GlancesExport):
return None
# Correct issue #1530
if self.protocol is not None and (self.protocol.lower() == "https"):
if self.protocol is not None and (self.protocol.lower() == 'https'):
ssl = True
else:
ssl = False
@ -72,7 +72,7 @@ class Export(GlancesExport):
password=self.password,
database=self.db,
)
get_all_db = [i["name"] for i in db.get_list_database()]
get_all_db = [i['name'] for i in db.get_list_database()]
except InfluxDBClientError as e:
logger.critical(f"Cannot connect to InfluxDB database '{self.db}' ({e})")
sys.exit(2)
@ -85,20 +85,76 @@ class Export(GlancesExport):
return db
def _normalize(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
ret = []
# Build initial dict by crossing columns and point
data_dict = dict(zip(columns, points))
# issue1871 - Check if a key exist. If a key exist, the value of
# the key should be used as a tag to identify the measurement.
keys_list = [k.split('.')[0] for k in columns if k.endswith('.key')]
if len(keys_list) == 0:
keys_list = [None]
for measurement in keys_list:
# Manage field
if measurement is not None:
fields = {
k.replace(f'{measurement}.', ''): data_dict[k] for k in data_dict if k.startswith(f'{measurement}.')
}
else:
fields = data_dict
# Transform to InfluxDB data model
# https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_reference/
for k in fields:
# Do not export empty (None) value
if fields[k] is None:
continue
# Convert numerical to float
try:
fields[k] = float(fields[k])
except (TypeError, ValueError):
# Convert others to string
try:
fields[k] = str(fields[k])
except (TypeError, ValueError):
pass
# Manage tags
tags = self.parse_tags(self.tags)
if 'key' in fields and fields['key'] in fields:
# Create a tag from the key
# Tag should be an string (see InfluxDB data model)
tags[fields['key']] = str(fields[fields['key']])
# Remove it from the field list (can not be a field and a tag)
fields.pop(fields['key'])
# Add the hostname as a tag
tags['hostname'] = self.hostname
# Add name as a tag (example for the process list)
for k in FIELD_TO_TAG:
if k in fields:
tags[k] = str(fields[k])
# Remove it from the field list (can not be a field and a tag)
fields.pop(k)
# Add the measurement to the list
ret.append({'measurement': name, 'tags': tags, 'fields': fields})
return ret
def export(self, name, columns, points):
"""Write the points to the InfluxDB server."""
# Manage prefix
if self.prefix is not None:
name = self.prefix + "." + name
name = self.prefix + '.' + name
# Write input to the InfluxDB database
if not points:
if len(points) == 0:
logger.debug(f"Cannot export empty {name} stats to InfluxDB")
else:
try:
self.client.write_points(
self.normalize_for_influxdb(name, columns, points),
time_precision="s",
)
self.client.write_points(self._normalize(name, columns, points), time_precision="s")
except Exception as e:
# Log level set to warning instead of error (see: issue #1561)
logger.warning(f"Cannot export {name} stats to InfluxDB ({e})")

View File

@ -6,7 +6,7 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""InfluxDB (from to InfluxDB 1.8+ to <3.0) interface class."""
"""InfluxDB (from to InfluxDB 1.8+) interface class."""
import sys
from platform import node
@ -16,6 +16,8 @@ from influxdb_client import InfluxDBClient, WriteOptions
from glances.exports.export import GlancesExport
from glances.logger import logger
FIELD_TO_TAG = ['name', 'cmdline', 'type']
class Export(GlancesExport):
"""This class manages the InfluxDB export module."""
@ -30,7 +32,7 @@ class Export(GlancesExport):
self.token = None
# Optional configuration keys
self.protocol = "http"
self.protocol = 'http'
self.prefix = None
self.tags = None
self.hostname = None
@ -38,12 +40,12 @@ class Export(GlancesExport):
# Load the InfluxDB configuration file
self.export_enable = self.load_conf(
"influxdb2",
mandatories=["host", "port", "user", "password", "org", "bucket", "token"],
options=["protocol", "prefix", "tags", "interval"],
'influxdb2',
mandatories=['host', 'port', 'user', 'password', 'org', 'bucket', 'token'],
options=['protocol', 'prefix', 'tags', 'interval'],
)
if not self.export_enable:
exit("Missing influxdb2 config")
exit('Missing influxdb2 config')
# Interval between two exports (in seconds)
if self.interval is None:
@ -58,7 +60,7 @@ class Export(GlancesExport):
logger.debug(f"InfluxDB export interval is set to {self.interval} seconds")
# The hostname is always add as a tag
self.hostname = node().split(".")[0]
self.hostname = node().split('.')[0]
# Init the InfluxDB client
self.client = self.init()
@ -68,16 +70,10 @@ class Export(GlancesExport):
if not self.export_enable:
return None
url = f"{self.protocol}://{self.host}:{self.port}"
url = f'{self.protocol}://{self.host}:{self.port}'
try:
# See docs: https://influxdb-client.readthedocs.io/en/stable/api.html#influxdbclient
client = InfluxDBClient(
url=url,
enable_gzip=False,
verify_ssl=False,
org=self.org,
token=self.token,
)
client = InfluxDBClient(url=url, enable_gzip=False, verify_ssl=False, org=self.org, token=self.token)
except Exception as e:
logger.critical(f"Cannot connect to InfluxDB server '{url}' ({e})")
sys.exit(2)
@ -97,22 +93,76 @@ class Export(GlancesExport):
)
)
def _normalize(self, name, columns, points):
"""Normalize data for the InfluxDB's data model.
:return: a list of measurements.
"""
ret = []
# Build initial dict by crossing columns and point
data_dict = dict(zip(columns, points))
# issue1871 - Check if a key exist. If a key exist, the value of
# the key should be used as a tag to identify the measurement.
keys_list = [k.split('.')[0] for k in columns if k.endswith('.key')]
if len(keys_list) == 0:
keys_list = [None]
for measurement in keys_list:
# Manage field
if measurement is not None:
fields = {
k.replace(f'{measurement}.', ''): data_dict[k] for k in data_dict if k.startswith(f'{measurement}.')
}
else:
fields = data_dict
# Transform to InfluxDB datamodel
# https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/
for k in fields:
# Do not export empty (None) value
if fields[k] is None:
continue
# Convert numerical to float
try:
fields[k] = float(fields[k])
except (TypeError, ValueError):
# Convert others to string
try:
fields[k] = str(fields[k])
except (TypeError, ValueError):
pass
# Manage tags
tags = self.parse_tags(self.tags)
if 'key' in fields and fields['key'] in fields:
# Create a tag from the key
# Tag should be an string (see InfluxDB data model)
tags[fields['key']] = str(fields[fields['key']])
# Remove it from the field list (can not be a field and a tag)
fields.pop(fields['key'])
# Add the hostname as a tag
tags['hostname'] = self.hostname
# Add name as a tag (example for the process list)
for k in FIELD_TO_TAG:
if k in fields:
tags[k] = str(fields[k])
# Remove it from the field list (can not be a field and a tag)
fields.pop(k)
# Add the measurement to the list
ret.append({'measurement': name, 'tags': tags, 'fields': fields})
return ret
def export(self, name, columns, points):
"""Write the points to the InfluxDB server."""
# Manage prefix
if self.prefix is not None:
name = self.prefix + "." + name
name = self.prefix + '.' + name
# Write input to the InfluxDB database
if not points:
if len(points) == 0:
logger.debug(f"Cannot export empty {name} stats to InfluxDB")
else:
try:
self.client.write(
self.bucket,
self.org,
self.normalize_for_influxdb(name, columns, points),
time_precision="s",
)
self.client.write(self.bucket, self.org, self._normalize(name, columns, points), time_precision="s")
except Exception as e:
# Log level set to warning instead of error (see: issue #1561)
logger.warning(f"Cannot export {name} stats to InfluxDB ({e})")

View File

@ -1,98 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""InfluxDB (for InfluxDB 3.x) interface class."""
import sys
from platform import node
from influxdb_client_3 import InfluxDBClient3
from glances.exports.export import GlancesExport
from glances.logger import logger
class Export(GlancesExport):
"""This class manages the InfluxDB export module."""
def __init__(self, config=None, args=None):
"""Init the InfluxDB export IF."""
super().__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.host = None
self.port = None
self.org = None
self.database = None
self.token = None
# Optional configuration keys
self.prefix = None
self.tags = None
self.hostname = None
# Load the InfluxDB configuration file
self.export_enable = self.load_conf(
"influxdb3",
mandatories=["host", "port", "org", "database", "token"],
options=["prefix", "tags"],
)
if not self.export_enable:
exit("Missing influxdb3 config")
# The hostname is always add as a tag
self.hostname = node().split(".")[0]
# Init the InfluxDB client
self.client = self.init()
def init(self):
"""Init the connection to the InfluxDB server."""
if not self.export_enable:
return None
try:
db = InfluxDBClient3(
host=self.host,
org=self.org,
database=self.database,
token=self.token,
)
except Exception as e:
logger.critical(f"Cannot connect to InfluxDB database '{self.database}' ({e})")
sys.exit(2)
if self.database == db._database:
logger.info(
f"Stats will be exported to InfluxDB server {self.host}:{self.port} in {self.database} database"
)
else:
logger.critical(f"InfluxDB database '{self.database}' did not exist. Please create it")
sys.exit(2)
return db
def export(self, name, columns, points):
"""Write the points to the InfluxDB server."""
# Manage prefix
if self.prefix is not None:
name = self.prefix + "." + name
# Write input to the InfluxDB database
if not points:
logger.debug(f"Cannot export empty {name} stats to InfluxDB")
else:
try:
self.client.write(
record=self.normalize_for_influxdb(name, columns, points),
time_precision="s",
)
except Exception as e:
# Log level set to warning instead of error (see: issue #1561)
logger.warning(f"Cannot export {name} stats to InfluxDB ({e})")
else:
logger.debug(f"Export {name} stats to InfluxDB")

View File

@ -48,10 +48,7 @@ class Export(GlancesExport):
# Export stats to JSON file
with open(self.json_filename, "wb") as self.json_file:
try:
self.json_file.write(json_dumps(self.buffer) + b'\n')
except Exception as e:
logger.error(f'Can not export data to JSON ({e})')
self.json_file.write(json_dumps(self.buffer) + b'\n')
# Reset buffer
self.buffer = {}

Some files were not shown because too many files have changed in this diff Show More