Compare commits

..

No commits in common. "develop" and "v4.3.2" have entirely different histories.

223 changed files with 16765 additions and 23568 deletions

View File

@ -8,8 +8,10 @@
!/glances/outputs/static
# Include Requirements files
!/all-requirements.txt
!/requirements.txt
!/docker-requirements.txt
!/webui-requirements.txt
!/optional-requirements.txt
# Include Config file
!/docker-compose/glances.conf
@ -17,6 +19,3 @@
# Include Binary file
!/docker-bin.sh
# Include TOML file
!/pyproject.toml

View File

@ -12,9 +12,9 @@ jobs:
if: github.event_name == 'push'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install pypa/build
@ -45,7 +45,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
@ -54,7 +54,6 @@ jobs:
with:
skip-existing: true
attestations: false
print-hash: true
pypi_test:
name: Publish Python 🐍 distribution 📦 to TestPyPI
@ -70,7 +69,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/

View File

@ -4,7 +4,7 @@ name: build_docker
env:
DEFAULT_DOCKER_IMAGE: nicolargo/glances
PUSH_BRANCH: ${{ 'refs/heads/develop' == github.ref || startsWith(github.ref, 'refs/tags/v') }}
PUSH_BRANCH: ${{ 'refs/heads/develop' == github.ref || 'refs/heads/master' == github.ref || startsWith(github.ref, 'refs/tags/v') }}
# Alpine image platform: https://hub.docker.com/_/alpine
# linux/arm/v6,linux/arm/v7 do not work (timeout during the build)
DOCKER_PLATFORMS: linux/amd64,linux/arm64/v8
@ -36,11 +36,15 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/v}
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"${VERSION}\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"${VERSION}-full\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"${VERSION}-full\" }]"
elif [[ $GITHUB_REF == refs/heads/develop ]]; then
TAG_ARRAY="[{ \"target\": \"dev\", \"tag\": \"dev\" }]"
elif [[ $GITHUB_REF == refs/heads/master ]]; then
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
elif [[ $GITHUB_REF == refs/heads/main ]]; then
TAG_ARRAY="[{ \"target\": \"minimal\", \"tag\": \"latest\" },"
TAG_ARRAY="$TAG_ARRAY { \"target\": \"full\", \"tag\": \"latest-full\" }]"
else
TAG_ARRAY="[]"
fi
@ -59,7 +63,7 @@ jobs:
tag: ${{ fromJson(needs.create_docker_images_list.outputs.tags) }}
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Retrieve Repository Docker metadata
id: docker_meta

View File

@ -11,7 +11,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Run Trivy vulnerability scanner in repo mode
uses: aquasecurity/trivy-action@master

View File

@ -10,7 +10,7 @@ jobs:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v10
- uses: actions/stale@v9
with:
days-before-issue-stale: 90
days-before-issue-close: -1

View File

@ -22,7 +22,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Check formatting with Ruff
uses: chartboost/ruff-action@v1
@ -37,16 +37,14 @@ jobs:
runs-on: ubuntu-24.04
strategy:
matrix:
# Python EOL version are note tested
# Multiple Python version only tested for Linux
python-version: ["3.10", "3.11", "3.12", "3.13", "3.14"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
@ -60,6 +58,11 @@ jobs:
run: |
python -m pytest ./tests/test_core.py
# Error appear with h11, not related to Glances
# Should be tested if correction is done
# Installed c:\hostedtoolcache\windows\python\3.9.13\x64\lib\site-packages\exceptiongroup-1.2.1-py3.9.egg
# error: h11 0.14.0 is installed but h11<0.13,>=0.11 is required by {'httpcore'}
# Error: Process completed with exit code 1.
test-windows:
needs: source-code-checks
@ -67,15 +70,14 @@ jobs:
runs-on: windows-2025
strategy:
matrix:
# Windows-curses not available for Python 3.14 for the moment
# See https://github.com/zephyrproject-rtos/windows-curses/issues/76
python-version: ["3.13"]
# Windows-curses not available for Python 3.13 for the moment
python-version: ["3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
@ -94,18 +96,18 @@ jobs:
needs: source-code-checks
# https://github.com/actions/runner-images?tab=readme-ov-file#available-images
runs-on: macos-15
runs-on: macos-14
strategy:
matrix:
# Only test the latest stable version
python-version: ["3.14"]
python-version: ["3.13"]
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'

View File

@ -14,9 +14,9 @@ jobs:
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Glances will be build with Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v5
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'

10
.gitignore vendored
View File

@ -23,7 +23,6 @@ local.properties
.classpath
.settings/
.loadpath
.ipynb_checkpoints/
# External tool builders
.externalToolBuilders/
@ -64,14 +63,7 @@ bower_components/
/*_source.tar.bz2
# Virtual env
.venv-uv/
.venv/
uv.lock
.python-version
/venv*/
# Test
.coverage
tests-data/issues/*/config/
# Local SSL certificates
glances.local*.pem

View File

@ -1,107 +1,22 @@
repos:
- repo: https://github.com/gitleaks/gitleaks
rev: v8.24.2
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: gitleaks
name: "🔒 security · Detect hardcoded secrets"
- id: check-ast
- id: check-docstring-first
- id: check-json
- id: check-merge-conflict
- id: check-shebang-scripts-are-executable
- id: check-toml
- id: check-yaml
- id: debug-statements
- id: detect-private-key
- id: mixed-line-ending
- id: requirements-txt-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.10
rev: v0.11.5
hooks:
- id: ruff-format
name: "🐍 python · Formatter with Ruff"
types_or: [ python, pyi ]
args: [ --config, './pyproject.toml' ]
- id: ruff-check
name: "🐍 python · Linter with Ruff"
types_or: [ python, pyi ]
args: [ --fix, --exit-non-zero-on-fix, --config, './pyproject.toml' ]
# - repo: https://github.com/RobertCraigie/pyright-python
# rev: v1.1.391
# hooks:
# - id: pyright
# name: "🐍 python · Check types"
# - repo: https://github.com/biomejs/pre-commit
# rev: "v2.3.7"
# hooks:
# - id: biome-check
# name: "🟨 javascript · Lint, format, and safe fixes with Biome"
- repo: https://github.com/python-jsonschema/check-jsonschema
rev: 0.35.0
hooks:
- id: check-github-workflows
name: "🐙 github-actions · Validate gh workflow files"
args: ["--verbose"]
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.11.0.1
hooks:
- id: shellcheck
name: "🐚 shell · Lint shell scripts"
- repo: https://github.com/openstack/bashate
rev: 2.1.1
hooks:
- id: bashate
name: "🐚 shell · Check shell script code style"
entry: bashate --error . --ignore=E006
# - repo: https://github.com/mrtazz/checkmake.git
# rev: 0.2.2
# hooks:
# - id: checkmake
# name: "🐮 Makefile · Lint Makefile"
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-executables-have-shebangs
name: "📁 filesystem/⚙️ exec · Verify shebang presence"
- id: check-shebang-scripts-are-executable
name: "📁 filesystem/⚙️ exec · Verify script permissions"
- id: check-case-conflict
name: "📁 filesystem/📝 names · Check case sensitivity"
- id: destroyed-symlinks
name: "📁 filesystem/🔗 symlink · Detect broken symlinks"
- id: check-merge-conflict
name: "🌳 git · Detect conflict markers"
- id: forbid-new-submodules
name: "🌳 git · Prevent submodule creation"
- id: no-commit-to-branch
name: "🌳 git · Protect main branches"
args: ["--branch", "main", "--branch", "master"]
- id: check-added-large-files
name: "🌳 git · Block large file commits"
args: ['--maxkb=5000']
- id: check-ast
name: "🐍 python/🔍 quality · Validate Python AST"
- id: check-docstring-first
name: "🐍 python/📝 style · Enforce docstring at top"
- id: check-json
name: "📄 formats/json · Validate JSON files"
- id: check-shebang-scripts-are-executable
name: "📁 filesystem/⚙️ exec · Ensure scripts are executable"
- id: check-toml
name: "📄 formats/toml · Validate TOML files"
- id: check-yaml
name: "📄 formats/yaml · Validate YAML syntax"
- id: debug-statements
name: "🐍 python/🪲 debug · Detect debug statements"
- id: detect-private-key
name: "🔐 security · Detect private keys"
- id: mixed-line-ending
name: "📄 text/↩️ newline · Normalize line endings"
- id: requirements-txt-fixer
name: "🐍 python/📦 deps · Sort requirements.txt"
- repo: local
hooks:
- id: find-duplicate-lines
name: "❗local script · Find duplicate lines at the end of file"
entry: bash tests-data/tools/find-duplicate-lines.sh
language: system
types: [python]
pass_filenames: false
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]

View File

@ -31,4 +31,4 @@ sphinx:
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: dev-requirements.txt
- requirements: doc-requirements.txt

View File

@ -3,12 +3,9 @@ include CONTRIBUTING.md
include COPYING
include NEWS.rst
include README.rst
include README-pypi.rst
include SECURITY.md
include conf/glances.conf
include conf/fetch-templates/*.jinja
include requirements.txt
include all-requirements.txt
recursive-include docs *
recursive-include glances *.py
recursive-include glances/outputs/static *

241
Makefile
View File

@ -1,6 +1,17 @@
PORT ?= 8008
CONF := conf/glances.conf
LASTTAG = $(shell git describe --tags --abbrev=0)
PORT ?= 8008
venv_full:= venv/bin
venv_min := venv-min/bin
CONF := conf/glances.conf
PIP := $(venv_full)/pip
PYTHON := $(venv_full)/python
PYTEST := $(venv_full)/python -m pytest
LASTTAG = $(shell git describe --tags --abbrev=0)
VENV_TYPES := full min
VENV_PYTHON := $(VENV_TYPES:%=venv-%-python)
VENV_UPG := $(VENV_TYPES:%=venv-%-upgrade)
VENV_DEPS := $(VENV_TYPES:%=venv-%)
VENV_INST_UPG := $(VENV_DEPS) $(VENV_UPG)
IMAGES_TYPES := full minimal
DISTROS := alpine ubuntu
@ -15,110 +26,94 @@ PODMAN_SOCK ?= /run/user/$(shell id -u)/podman/podman.sock
DOCKER_SOCK ?= /var/run/docker.sock
DOCKER_SOCKS := -v $(PODMAN_SOCK):$(PODMAN_SOCK):ro -v $(DOCKER_SOCK):$(DOCKER_SOCK):ro
DOCKER_OPTS := --rm -e TZ="${TZ}" -e GLANCES_OPT="" --pid host --network host
UV_RUN := .venv-uv/bin/uv
# if the command is only `make`, the default tasks will be the printing of the help.
.DEFAULT_GOAL := help
.PHONY: help test docs docs-server venv requirements profiling docker all clean all test
.PHONY: help test docs docs-server venv venv-min
help: ## List all make commands available
@grep -E '^[\.a-zA-Z_%-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk -F ":" '{print $1}' | \
grep -v % | sed 's/\\//g' | sort | \
grep -v % | \
sed 's/\\//g' | \
sort | \
awk 'BEGIN {FS = ":[^:]*?##"}; {printf "\033[1;34mmake %-50s\033[0m %s\n", $$1, $$2}'
# ===================================================================
# Virtualenv
# ===================================================================
# install-uv: ## Instructions to install the UV tool
# @echo "Install the UV tool (https://astral.sh/uv/)"
# @echo "Please install the UV tool manually"
# @echo "For example with: curl -LsSf https://astral.sh/uv/install.sh | sh"
# @echo "Or via a package manager of your distribution"
# @echo "For example for Snap: snap install astral-uv"
venv-%-upgrade: UPGRADE = --upgrade
install-uv: ## Install UV tool in a specific virtualenv
python3 -m venv .venv-uv
.venv-uv/bin/pip install uv
define DEFINE_VARS_FOR_TYPE
venv-$(TYPE) venv-$(TYPE)-upgrade: VIRTUAL_ENV = $(venv_$(TYPE))
endef
upgrade-uv: ## Upgrade the UV tool
.venv-uv/bin/pip install --upgrade pip
.venv-uv/bin/pip install --upgrade uv
$(foreach TYPE,$(VENV_TYPES),$(eval $(DEFINE_VARS_FOR_TYPE)))
venv: ## Create the virtualenv with all dependencies
$(UV_RUN) sync --all-extras --no-group dev
$(VENV_PYTHON): venv-%-python:
virtualenv -p python3 $(if $(filter full,$*),venv,venv-$*)
venv-upgrade venv-switch-to-full: ## Upgrade the virtualenv with all dependencies
$(UV_RUN) sync --upgrade --all-extras
$(VENV_INST_UPG): venv-%:
$(if $(UPGRADE),$(VIRTUAL_ENV)/pip install --upgrade pip,)
$(foreach REQ,$(REQS), $(VIRTUAL_ENV)/pip install $(UPGRADE) -r $(REQ);)
$(if $(PRE_COMMIT),$(VIRTUAL_ENV)/pre-commit install --hook-type pre-commit,)
venv-min: ## Create the virtualenv with minimal dependencies
$(UV_RUN) sync
venv-python: $(VENV_PYTHON) ## Install all Python 3 venv
venv: $(VENV_DEPS) ## Install all Python 3 dependencies
venv-upgrade: $(VENV_UPG) ## Upgrade all Python 3 dependencies
venv-upgrade-min venv-switch-to-min: ## Upgrade the virtualenv with minimal dependencies
$(UV_RUN) sync --upgrade
# For full installation (with optional dependencies)
venv-clean: ## Remove the virtualenv
rm -rf .venv
venv-full venv-full-upgrade: REQS = requirements.txt optional-requirements.txt dev-requirements.txt doc-requirements.txt
venv-dev: ## Create the virtualenv with dev dependencies
$(UV_RUN) sync --dev --all-extras
$(UV_RUN) run pre-commit install --hook-type pre-commit
venv-full-python: ## Install Python 3 venv
venv-full: venv-python ## Install Python 3 run-time
venv-full-upgrade: ## Upgrade Python 3 run-time dependencies
venv-full: PRE_COMMIT = 1
# ===================================================================
# Requirements
#
# Note: the --no-hashes option should be used because pip (in CI) has
# issues with hashes.
# ===================================================================
# For minimal installation (without optional dependencies)
requirements-min: ## Generate the requirements.txt files (minimal dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --no-group dev --output-file requirements.txt
venv-min venv-min-upgrade: REQS = requirements.txt dev-requirements.txt doc-requirements.txt
requirements-all: ## Generate the all-requirements.txt files (all dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
requirements-docker: ## Generate the docker-requirements.txt files (Docker specific dependencies)
$(UV_RUN) export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
requirements-dev: ## Generate the dev-requirements.txt files (dev dependencies)
$(UV_RUN) export --no-hashes --only-dev --output-file dev-requirements.txt
requirements: requirements-min requirements-all requirements-dev requirements-docker ## Generate all the requirements files
requirements-upgrade: venv-upgrade requirements ## Upgrade the virtualenv and regenerate all the requirements files
venv-min-python: ## Install Python 3 venv minimal
venv-min: venv-min-python ## Install Python 3 minimal run-time dependencies
venv-min-upgrade: ## Upgrade Python 3 minimal run-time dependencies
# ===================================================================
# Tests
# ===================================================================
test: ## Run All unit tests
$(UV_RUN) run pytest
$(PYTEST)
test-core: ## Run Core unit tests
$(UV_RUN) run pytest tests/test_core.py
test-api: ## Run API unit tests
$(UV_RUN) run pytest tests/test_api.py
$(PYTEST) tests/test_core.py
test-memoryleak: ## Run Memory-leak unit tests
$(UV_RUN) run pytest tests/test_memoryleak.py
$(PYTEST) tests/test_memoryleak.py
test-perf: ## Run Perf unit tests
$(UV_RUN) run pytest tests/test_perf.py
$(PYTEST) tests/test_perf.py
test-restful: ## Run Restful API unit tests
$(UV_RUN) run pytest tests/test_restful.py
$(PYTEST) tests/test_restful.py
test-webui: ## Run WebUI unit tests
$(UV_RUN) run pytest tests/test_webui.py
$(PYTEST) tests/test_webui.py
test-xmlrpc: ## Run XMLRPC API unit tests
$(UV_RUN) run pytest tests/test_xmlrpc.py
$(PYTEST) tests/test_xmlrpc.py
test-with-upgrade: venv-upgrade test ## Upgrade deps and run unit tests
test-min: ## Run core unit tests in minimal environment
$(venv_min)/python -m pytest tests/test_core.py
test-min-with-upgrade: venv-min-upgrade ## Upgrade deps and run unit tests in minimal environment
$(venv_min)/python -m pytest tests/test_core.py
test-export-csv: ## Run interface tests with CSV
/bin/bash ./tests/test_export_csv.sh
@ -134,36 +129,26 @@ test-export-influxdb-v3: ## Run interface tests with InfluxDB version 3 (Core)
test-export-timescaledb: ## Run interface tests with TimescaleDB
/bin/bash ./tests/test_export_timescaledb.sh
test-export-nats: ## Run interface tests with NATS
/bin/bash ./tests/test_export_nats.sh
test-exports: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb test-export-nats ## Tests all exports
test-export: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb## Tests all exports
# ===================================================================
# Linters, profilers and cyber security
# ===================================================================
pre-commit: ## Run pre-commit hooks
$(UV_RUN) run pre-commit run --all-files
find-duplicate-lines: ## Search for duplicate lines in files
/bin/bash tests-data/tools/find-duplicate-lines.sh
format: ## Format the code
$(UV_RUN) run ruff format .
$(venv_full)/python -m ruff format .
lint: ## Lint the code.
$(UV_RUN) run ruff check . --fix
$(venv_full)/python -m ruff check . --fix
lint-readme: ## Lint the main README.rst file
$(UV_RUN) run rstcheck README.rst
$(UV_RUN) run rstcheck README-pypi.rst
$(venv_full)/python -m rstcheck README.rst
codespell: ## Run codespell to fix common misspellings in text files
$(UV_RUN) run codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
$(venv_full)/codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
semgrep: ## Run semgrep to find bugs and enforce code standards
$(UV_RUN) run semgrep scan --config=auto
$(venv_full)/semgrep scan --config=auto
profiling-%: SLEEP = 3
profiling-%: TIMES = 30
@ -177,27 +162,27 @@ endef
profiling-gprof: CPROF = glances.cprof
profiling-gprof: ## Callgraph profiling (need "apt install graphviz")
$(DISPLAY-BANNER)
$(UV_RUN) run python -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(UV_RUN) run gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
$(PYTHON) -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(venv_full)/gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
rm -f $(CPROF)
profiling-pyinstrument: ## PyInstrument profiling
$(DISPLAY-BANNER)
$(UV_RUN) add pyinstrument
$(UV_RUN) run pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
$(PIP) install pyinstrument
$(PYTHON) -m pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
profiling-pyspy: ## Flame profiling
$(DISPLAY-BANNER)
$(UV_RUN) run py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- .venv-uv/bin/uvrun python run-venv.py -C $(CONF) --stop-after $(TIMES)
$(venv_full)/py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- $(PYTHON) run-venv.py -C $(CONF) --stop-after $(TIMES)
profiling: profiling-gprof profiling-pyinstrument profiling-pyspy ## Profiling of the Glances software
trace-malloc: ## Trace the malloc() calls
@echo "Malloc test is running, please wait ~30 secondes..."
$(UV_RUN) run python -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
$(PYTHON) -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
memory-leak: ## Profile memory leaks
$(UV_RUN) run python -m glances -C $(CONF) --memory-leak
$(PYTHON) -m glances -C $(CONF) --memory-leak
memory-profiling: TIMES = 2400
memory-profiling: PROFILE = mprofile_*.dat
@ -206,37 +191,30 @@ memory-profiling: ## Profile memory usage
@echo "It's a very long test (~4 hours)..."
rm -f $(PROFILE)
@echo "1/2 - Start memory profiling with the history option enable"
$(UV_RUN) run mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
$(UV_RUN) run mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
$(venv_full)/mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
$(venv_full)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
rm -f $(PROFILE)
@echo "2/2 - Start memory profiling with the history option disable"
$(UV_RUN) run mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(UV_RUN) run mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
$(venv_full)/mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(venv_full)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
rm -f $(PROFILE)
# Trivy installation: https://aquasecurity.github.io/trivy/latest/getting-started/installation/
trivy: ## Run Trivy to find vulnerabilities
$(UV_RUN) run trivy fs ./glances/
bandit: ## Run Bandit to find vulnerabilities
$(UV_RUN) run bandit glances -r
trivy: ## Run Trivy to find vulnerabilities in container images
trivy fs .
# ===================================================================
# Docs
# ===================================================================
docs: ## Create the documentation
$(UV_RUN) run python -m glances -C $(CONF) --api-doc > ./docs/api/python.rst
$(UV_RUN) run python ./generate_openapi.py
$(UV_RUN) run python -m glances -C $(CONF) --api-restful-doc > ./docs/api/restful.rst
$(PYTHON) ./generate_openapi.py
$(PYTHON) -m glances -C $(CONF) --api-doc > ./docs/api.rst
cd docs && ./build.sh && cd ..
docs-server: docs ## Start a Web server to serve the documentation
(sleep 2 && sensible-browser "http://localhost:$(PORT)") &
cd docs/_build/html/ && .venv-uv/bin/uvrun python -m http.server $(PORT)
docs-jupyter: ## Start Jupyter Notebook
$(UV_RUN) run --with jupyter jupyter lab
cd docs/_build/html/ && ../../../venv/bin/python -m http.server $(PORT)
release-note: ## Generate release note
git --no-pager log $(LASTTAG)..HEAD --first-parent --pretty=format:"* %s"
@ -253,19 +231,17 @@ install: ## Open a Web Browser to the installation procedure
webui webui%: DIR = glances/outputs/static/
webui-gen-config: ## Generate the Web UI config file
$(UV_RUN) run python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
webui: webui-gen-config ## Build the Web UI
webui: ## Build the Web UI
$(PYTHON) -c 'import json; from glances.outputs.glances_curses import _GlancesCurses; print(json.dumps({ "leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"]}, indent=4))' > ./glances/outputs/static/js/uiconfig.json
cd $(DIR) && npm ci && npm run build
webui-audit: ## Audit the Web UI
cd $(DIR) && npm audit
webui-audit-fix: webui-gen-config ## Fix audit the Web UI
webui-audit-fix: ## Fix audit the Web UI
cd $(DIR) && npm audit fix && npm ci && npm run build
webui-update: webui-gen-config ## Update JS dependencies
webui-update: ## Update JS dependencies
cd $(DIR) && npm update --save && npm ci && npm run build
# ===================================================================
@ -274,7 +250,7 @@ webui-update: webui-gen-config ## Update JS dependencies
flatpak: venv-upgrade ## Generate FlatPack JSON file
git clone https://github.com/flatpak/flatpak-builder-tools.git
$(UV_RUN) run python ./flatpak-builder-tools/pip/flatpak-pip-generator glances
$(PYTHON) ./flatpak-builder-tools/pip/flatpak-pip-generator glances
rm -rf ./flatpak-builder-tools
@echo "Now follow: https://github.com/flathub/flathub/wiki/App-Submission"
@ -308,33 +284,33 @@ docker-ubuntu-full: ## Generate local docker image (Ubuntu full)
docker-ubuntu-minimal: ## Generate local docker image (Ubuntu minimal)
docker-ubuntu-dev: ## Generate local docker image (Ubuntu dev)
trivy-docker: ## Run Trivy to find vulnerabilities in Docker images
$(UV_RUN) run trivy image glances:local-alpine-full
$(UV_RUN) run trivy image glances:local-alpine-minimal
$(UV_RUN) run trivy image glances:local-ubuntu-full
$(UV_RUN) run trivy image glances:local-ubuntu-minimal
# ===================================================================
# Run
# ===================================================================
run: ## Start Glances in console mode (also called standalone)
$(UV_RUN) run python -m glances -C $(CONF)
$(PYTHON) -m glances -C $(CONF)
run-debug: ## Start Glances in debug console mode (also called standalone)
$(UV_RUN) run python -m glances -C $(CONF) -d
$(PYTHON) -m glances -C $(CONF) -d
run-local-conf: ## Start Glances in console mode with the system conf file
$(UV_RUN) run python -m glances
$(PYTHON) -m glances
run-local-conf-hide-public: ## Start Glances in console mode with the system conf file and hide public information
$(UV_RUN) run python -m glances --hide-public-info
$(PYTHON) -m glances --hide-public-info
run-min: ## Start minimal Glances in console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF)
run-min-debug: ## Start minimal Glances in debug console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF) -d
run-min-local-conf: ## Start minimal Glances in console mode with the system conf file
$(venv_min)/python -m glances
run-like-htop: ## Start Glances with the same features than Htop
$(UV_RUN) run python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
run-fetch: ## Start Glances in fetch mode
$(UV_RUN) run python -m glances --fetch
$(venv_min)/python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
$(DOCKER_RUNTIMES): run-docker-%:
$(DOCKER_RUN) $(DOCKER_OPTS) $(DOCKER_SOCKS) -it glances:local-$*
@ -346,35 +322,32 @@ run-docker-ubuntu-minimal: ## Start Glances Ubuntu Docker minimal in console mod
run-docker-ubuntu-full: ## Start Glances Ubuntu Docker full in console mode
run-docker-ubuntu-dev: ## Start Glances Ubuntu Docker dev in console mode
generate-ssl: ## Generate local and sel signed SSL certificates for dev (need mkcert)
mkcert glances.local localhost 120.0.0.1 0.0.0.0
run-webserver: ## Start Glances in Web server mode
$(UV_RUN) run python -m glances -C $(CONF) -w
$(PYTHON) -m glances -C $(CONF) -w
run-webserver-local-conf: ## Start Glances in Web server mode with the system conf file
$(UV_RUN) run python -m glances -w
$(PYTHON) -m glances -w
run-webserver-local-conf-hide-public: ## Start Glances in Web server mode with the system conf file and hide public info
$(UV_RUN) run python -m glances -w --hide-public-info
$(PYTHON) -m glances -w --hide-public-info
run-restapiserver: ## Start Glances in REST API server mode
$(UV_RUN) run python -m glances -C $(CONF) -w --disable-webui
$(PYTHON) -m glances -C $(CONF) -w --disable-webui
run-server: ## Start Glances in server mode (RPC)
$(UV_RUN) run python -m glances -C $(CONF) -s
$(PYTHON) -m glances -C $(CONF) -s
run-client: ## Start Glances in client mode (RPC)
$(UV_RUN) run python -m glances -C $(CONF) -c localhost
$(PYTHON) -m glances -C $(CONF) -c localhost
run-browser: ## Start Glances in browser mode (RPC)
$(UV_RUN) run python -m glances -C $(CONF) --browser
$(PYTHON) -m glances -C $(CONF) --browser
run-web-browser: ## Start Web Central Browser
$(UV_RUN) run python -m glances -C $(CONF) -w --browser
$(PYTHON) -m glances -C $(CONF) -w --browser
run-issue: ## Start Glances in issue mode
$(UV_RUN) run python -m glances -C $(CONF) --issue
$(PYTHON) -m glances -C $(CONF) --issue
run-multipass: ## Install and start Glances in a VM (only available on Ubuntu with multipass already installed)
multipass launch -n glances-on-lts lts
@ -384,4 +357,4 @@ run-multipass: ## Install and start Glances in a VM (only available on Ubuntu wi
multipass delete glances-on-lts
show-version: ## Show Glances version number
$(UV_RUN) run python -m glances -C $(CONF) -V
$(PYTHON) -m glances -C $(CONF) -V

117
NEWS.rst
View File

@ -1,110 +1,11 @@
==============================================================================
Glances ChangeLog
==============================================================================
=============
Version 4.4.1
=============
Bug corrected:
* Restful API issue after a while (stats are no more updated) #3333
=============
Version 4.4.0
=============
Breaking changes:
* A new Python API is now available to use Glances as a Python lib in your hown development #3237
* In the process list, the long command line is now truncated by default. Use the arrow keys to show the full command line. SHIFT + arrow keys are used to switch between column sorts (TUI).
* Prometheus export format is now more user friendly (see detail in #3283)
Enhancements:
* Make a Glances API in order to use Glances as a Python lib #3237
* Add a new --fetch (neofetch like) option to display a snapshot of the current system status #3281
* Show used port in container section #2054
* Show long command line with arrow key #1553
* Sensors plugin refresh by default every 10 seconds
* Do not call update if a call is done to a specific plugin through the API #3033
* [UI] Process virtual memory display can be disable by configuration #3299
* Choose between used or available in the mem plugin #3288
* [Experimental] Add export to DuckDB database #3205
* Add Disk I/O Latency stats #1070
* Filter fields to export #3258
* Remove .keys() from loops over dicts #3253
* Remove iterator helpers #3252
Bug corrected:
* [MACOS] Glances not showing Processes on MacOS #3100
* Last dev build broke Homepage API calls ? only 1 widget still working #3322
* Cloud plugin always generate communication with 169.254.169.254, even if the plugin is disabled #3316
* API response delay (3+ minutes) when VMs are running #3317
* [WINDOWS] Glances do not display CPU stat correctly #3155
* Glances hangs if network device (NFS) is no available #3290
* Fix prometheus export format #3283
* Issue #3279 zfs cache and memory math issues #3289
* [MACOS] Glances crashes when I try to filter #3266
* Glances hang when killing process with muliple CTRL-C #3264
* Issues after disabling system and processcount plugins #3248
* Headers missing from predefined fields in TUI browser machine list #3250
* Add another check for the famous Netifaces issue - Related to #3219
* Key error 'type' in server_list_static.py (load_server_list) #3247
Continious integration and documentation:
* Glances now use uv for the dev environment #3025
* Glances is compatible with Python 3.14 #3319
* Glances provides requirements files with specific versions for each release
* Requirements files are now generated dynamically with the make requirements or requirements-upgrade target
* Add duplicate line check in pre-commit (strange behavor with some VScode extension)
* Solve issue with multiprocessing exception with Snap package
* Add a test script for identify CPU consumption of sensor plugin
* Refactor port to take into account netifaces2
* Correct issue with Chrome driver in WebUI unit test
* Upgrade export test with InfluxDB 1.12
* Fix typo of --export-process-filter help message #3314
* In the outdated feature, catch error message if Pypi server not reachable
* Add unit test for auto_unit
* Label error in docs #3286
* Put WebUI conf generator in a dedicated script
* Refactor the Makefile to generate WebUI config file for all webui targets
* Update sensors documentation #3275
* Update docker compose env quote #3273
* Update docker-compose.yml #3249
* Update API doc generation
* Update README with nice icons #3236
* Add documentation for WebUI test
Thanks to all contributors and bug reporters !
Special thanks to:
- Adi
- Bennett Kanuka
- Tim Potter
- Ariel Otilibili
- Boris Okassa
- Lawrence
- Shohei YOSHIDA
- jmwallach
- korn3r
============================================================================
=============
Version 4.3.3
=============
Bug corrected:
* Something in 4.3.2 broke the home assistant add-on for Glances #3238
Thanks to the FastAPI and Home Assistant community for the support.
=============
Version 4.3.2
=============
Enhancements:
* Add stats about running VMS (qemu/libvirt/kvm support through virsh) #1531
@ -513,7 +414,7 @@ See release note in Wiki format: https://github.com/nicolargo/glances/wiki/Glanc
**BREAKING CHANGES:**
* The minimal Python version is 3.8
* The Glances API version 3 is replaced by the version 4. So Restful API URL is now /api/4/ #2610
* The Glances API version 3 is replaced by the version 4. So Restfull API URL is now /api/4/ #2610
* Alias definition change in the configuration file #1735
Glances version 3.x and lower:
@ -538,9 +439,9 @@ Minimal requirements for Glances version 4 are:
* packaging
* ujson
* pydantic
* fastapi (for WebUI / RestFul API)
* uvicorn (for WebUI / RestFul API)
* jinja2 (for WebUI / RestFul API)
* fastapi (for WebUI / RestFull API)
* uvicorn (for WebUI / RestFull API)
* jinja2 (for WebUI / RestFull API)
Majors changes between Glances version 3 and version 4:
@ -600,7 +501,7 @@ Bug corrected:
CI and documentation:
* New logo for Glances version 4.0 #2713
* Update api-restful.rst documentation #2496
* Update api.rst documentation #2496
* Change Renovate config #2729
* Docker compose password unrecognized arguments when applying docs #2698
* Docker includes OS Release Volume mount info #2473
@ -978,7 +879,7 @@ Bugs corrected:
* Threading.Event.isSet is deprecated in Python 3.10 #2017
* Fix code scanning alert - Clear-text logging of sensitive information security #2006
* The gpu temperature unit are displayed incorrectly in web ui bug #2002
* Doc for 'alert' Restful/JSON API response documentation #1994
* Doc for 'alert' Restfull/JSON API response documentation #1994
* Show the spinning state of a disk documentation #1993
* Web server status check endpoint enhancement #1988
* --time parameter being ignored for client/server mode bug #1978
@ -1073,7 +974,7 @@ Bugs corrected:
* [3.2.0/3.2.1] keybinding not working anymore #1904
* InfluxDB/InfluxDB2 Export object has no attribute hostname #1899
Documentation: The "make docs" generate RestFul/API documentation file.
Documentation: The "make docs" generate RestFull/API documentation file.
===============
Version 3.2.1
@ -2100,7 +2001,7 @@ Version 2.1
* Add Glances log message (in the /tmp/glances.log file)
The default log level is INFO, you can switch to the DEBUG mode using the -d option on the command line.
* Add RESTful API to the Web server mode
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
* Improve SNMP fallback mode for Cisco IOS, VMware ESXi
* Add --theme-white feature to optimize display for white background
* Experimental history feature (--enable-history option on the command line)

View File

@ -1,385 +0,0 @@
Glances 🌟
==========
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
CPU, memory, disk, network usage etc. It also allows monitoring of running processes,
logged in users, temperatures, voltages, fan speeds etc.
It also supports container monitoring, it supports different container management
systems such as Docker, LXC. The information is presented in an easy to read dashboard
and can also be used for remote monitoring of systems via a web interface or command
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Usage 👋
========
For the standalone mode, just run:
.. code-block:: console
$ glances
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/glances-responsive-webdesign.png
For the Web server mode, run:
.. code-block:: console
$ glances -w
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/screenshot-web.png
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
.. code-block:: console
$ glances -s
and this one on the client:
.. code-block:: console
$ glances -c <ip>
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
.. code-block:: console
$ glances --browser
or WebUI:
.. code-block:: console
$ glances -w --browser
It possible to display raw stats on stdout:
.. code-block:: console
$ glances --stdout cpu.user,mem.used,load
cpu.user: 30.7
mem.used: 3278204928
load: {'cpucore': 4, 'min1': 0.21, 'min5': 0.4, 'min15': 0.27}
cpu.user: 3.4
mem.used: 3275251712
load: {'cpucore': 4, 'min1': 0.19, 'min5': 0.39, 'min15': 0.27}
...
or in a CSV format thanks to the stdout-csv option:
.. code-block:: console
$ glances --stdout-csv now,cpu.user,mem.used,load
now,cpu.user,mem.used,load.cpucore,load.min1,load.min5,load.min15
2018-12-08 22:04:20 CEST,7.3,5948149760,4,1.04,0.99,1.04
2018-12-08 22:04:23 CEST,5.4,5949136896,4,1.04,0.99,1.04
...
or in a JSON format thanks to the stdout-json option (attribute not supported in this mode in order to have a real JSON object in output):
.. code-block:: console
$ glances --stdout-json cpu,mem
cpu: {"total": 29.0, "user": 24.7, "nice": 0.0, "system": 3.8, "idle": 71.4, "iowait": 0.0, "irq": 0.0, "softirq": 0.0, "steal": 0.0, "guest": 0.0, "guest_nice": 0.0, "time_since_update": 1, "cpucore": 4, "ctx_switches": 0, "interrupts": 0, "soft_interrupts": 0, "syscalls": 0}
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: https://github.com/nicolargo/glances/raw/refs/heads/master/docs/_static/screenshot-fetch.png
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu["total"]
7.3
>>> gl.mem["used"]
12498582144
>>> gl.auto_unit(gl.mem["used"])
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network["wlp0s20f3"]
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM! and the `FAQ`_), please post it on the official Reddit `forum`_ or in GitHub `Discussions`_.
Gateway to other services 🌐
============================
Glances can export stats to:
- ``CSV`` file
- ``JSON`` file
- ``InfluxDB`` server
- ``Cassandra`` server
- ``CouchDB`` server
- ``OpenTSDB`` server
- ``Prometheus`` server
- ``StatsD`` server
- ``ElasticSearch`` server
- ``PostgreSQL/TimeScale`` server
- ``RabbitMQ/ActiveMQ`` broker
- ``ZeroMQ`` broker
- ``Kafka`` broker
- ``Riemann`` server
- ``Graphite`` server
- ``RESTful`` endpoint
Installation 🚀
===============
There are several methods to test/install Glances on your system. Choose your weapon!
PyPI: Pip, the standard way
---------------------------
Glances is on ``PyPI``. By using PyPI, you will be using the latest stable version.
To install Glances, simply use the ``pip`` command line.
Warning: on modern Linux operating systems, you may have an externally-managed-environment
error message when you try to use ``pip``. In this case, go to the the PipX section below.
.. code-block:: console
pip install --user glances
*Note*: Python headers are required to install `psutil`_, a Glances
dependency. For example, on Debian/Ubuntu **the simplest** is
``apt install python3-psutil`` or alternatively need to install first
the *python-dev* package and gcc (*python-devel* on Fedora/CentOS/RHEL).
For Windows, just install psutil from the binary installation file.
By default, Glances is installed **without** the Web interface dependencies.
To install it, use the following command:
.. code-block:: console
pip install --user 'glances[web]'
For a full installation (with all features, see features list bellow):
.. code-block:: console
pip install --user 'glances[all]'
Features list:
- all: install dependencies for all features
- action: install dependencies for action feature
- browser: install dependencies for Glances centram browser
- cloud: install dependencies for cloud plugin
- containers: install dependencies for container plugin
- export: install dependencies for all exports modules
- gpu: install dependencies for GPU plugin
- graph: install dependencies for graph export
- ip: install dependencies for IP public option
- raid: install dependencies for RAID plugin
- sensors: install dependencies for sensors plugin
- smart: install dependencies for smart plugin
- snmp: install dependencies for SNMP
- sparklines: install dependencies for sparklines option
- web: install dependencies for Webserver (WebUI) and Web API
- wifi: install dependencies for Wifi plugin
To upgrade Glances to the latest version:
.. code-block:: console
pip install --user --upgrade glances
The current develop branch is published to the test.pypi.org package index.
If you want to test the develop version (could be instable), enter:
.. code-block:: console
pip install --user -i https://test.pypi.org/simple/ Glances
PyPI: PipX, the alternative way
-------------------------------
Install PipX on your system (apt install pipx on Ubuntu).
Install Glances (with all features):
.. code-block:: console
pipx install 'glances[all]'
The glances script will be installed in the ~/.local/bin folder.
Shell tab completion 🔍
=======================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
For example, on a Linux operating system with bash shell:
.. code-block:: console
$ mkdir -p ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion
$ glances --print-completion bash > ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
$ source ${XDG_DATA_HOME:="$HOME/.local/share"}/bash-completion/glances
Following shells are supported: bash, zsh and tcsh.
Requirements 🧩
===============
Glances is developed in Python. A minimal Python version 3.10 or higher
should be installed on your system.
*Note for Python 2 users*
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.10).
Please uses Glances version 3.4.x if you need Python 2 support.
Dependencies:
- ``psutil`` (better with latest version)
- ``defusedxml`` (in order to monkey patch xmlrpc)
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
Extra dependencies:
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
- ``cassandra-driver`` (for the Cassandra export module)
- ``chevron`` (for the action script feature)
- ``docker`` (for the Containers Docker monitoring support)
- ``elasticsearch`` (for the Elastic Search export module)
- ``FastAPI`` and ``Uvicorn`` (for Web server mode)
- ``graphitesender`` (For the Graphite export module)
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``kafka-python`` (for the Kafka export module)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
- ``podman`` (for the Containers Podman monitoring support)
- ``potsdb`` (for the OpenTSDB export module)
- ``prometheus_client`` (for the Prometheus export module)
- ``psycopg[binary]`` (for the PostgreSQL/TimeScale export module)
- ``pygal`` (for the graph export module)
- ``pymdstat`` (for RAID support) [Linux-only]
- ``pymongo`` (for the MongoDB export module)
- ``pysnmp-lextudio`` (for SNMP support)
- ``pySMART.smartx`` (for HDD Smart support) [Linux-only]
- ``pyzmq`` (for the ZeroMQ export module)
- ``requests`` (for the Ports, Cloud plugins and RESTful export module)
- ``sparklines`` (for the Quick Plugin sparklines option)
- ``statsd`` (for the StatsD export module)
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
Project sponsorship 🙌
======================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
- sponsor me using one-time or monthly tier Github sponsors_ page
- send me some pieces of bitcoin: 185KN9FCix3svJYp7JQM7hRMfSKyeaJR4X
- buy me a gift on my wishlist_ page
Any and all contributions are greatly appreciated.
Authors and Contributors 🔥
===========================
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License 📜
==========
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
.. _psutil: https://github.com/giampaolo/psutil
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://www.reddit.com/r/glances/
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html
.. _FAQ: https://github.com/nicolargo/glances/blob/develop/docs/faq.rst
.. _Discussions: https://github.com/nicolargo/glances/discussions

View File

@ -1,18 +1,10 @@
.. raw:: html
<div align="center">
.. image:: ./docs/_static/glances-responsive-webdesign.png
.. raw:: html
<h1>Glances</h1>
An Eye on your System
===============================
Glances - An Eye on your System
===============================
| |pypi| |test| |contributors| |quality|
| |starts| |docker| |pypistat| |sponsors|
| |reddit|
| |starts| |docker| |pypistat| |ossrank|
| |sponsors| |twitter|
.. |pypi| image:: https://img.shields.io/pypi/v/glances.svg
:target: https://pypi.python.org/pypi/Glances
@ -29,6 +21,10 @@ An Eye on your System
:target: https://pepy.tech/project/glances
:alt: Pypi downloads
.. |ossrank| image:: https://shields.io/endpoint?url=https://ossrank.com/shield/3689
:target: https://ossrank.com/p/3689
:alt: OSSRank
.. |test| image:: https://github.com/nicolargo/glances/actions/workflows/ci.yml/badge.svg?branch=develop
:target: https://github.com/nicolargo/glances/actions
:alt: Linux tests (GitHub Actions)
@ -45,20 +41,12 @@ An Eye on your System
:target: https://github.com/sponsors/nicolargo
:alt: Sponsors
.. |twitter| image:: https://img.shields.io/badge/X-000000?style=for-the-badge&logo=x&logoColor=white
.. |twitter| image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
:alt: @nicolargo
.. |reddit| image:: https://img.shields.io/badge/Reddit-FF4500?style=for-the-badge&logo=reddit&logoColor=white
:target: https://www.reddit.com/r/glances/
:alt: @reddit
.. raw:: html
</div>
Summary 🌟
==========
Summary
=======
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
@ -70,17 +58,21 @@ and can also be used for remote monitoring of systems via a web interface or com
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-summary.png
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-responsive-webdesign.png
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Usage 👋
========
Usage
=====
For the standalone mode, just run:
@ -88,8 +80,6 @@ For the standalone mode, just run:
$ glances
.. image:: ./docs/_static/glances-summary.png
For the Web server mode, run:
.. code-block:: console
@ -98,36 +88,28 @@ For the Web server mode, run:
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
.. image:: ./docs/_static/screenshot-web.png
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
For the client/server mode, run:
.. code-block:: console
$ glances -s
and this one on the client:
on the server side and run:
.. code-block:: console
$ glances -c <ip>
on the client one.
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
network or defined in the configuration file:
.. code-block:: console
$ glances --browser
or WebUI:
.. code-block:: console
$ glances -w --browser
It possible to display raw stats on stdout:
You can also display raw stats on stdout:
.. code-block:: console
@ -159,110 +141,56 @@ or in a JSON format thanks to the stdout-json option (attribute not supported in
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
and RTFM, always.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: ./docs/_static/screenshot-fetch.png
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu.get("total")
7.3
>>> gl.mem.get("used")
12498582144
>>> gl.auto_unit(gl.mem.get("used"))
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network.get("wlp0s20f3")
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
Documentation
=============
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM! and the `FAQ`_), please post it on the official Reddit `forum`_ or in GitHub `Discussions`_.
If you have any question (after RTFM!), please post it on the official Q&A `forum`_.
Gateway to other services 🌐
============================
Gateway to other services
=========================
Glances can export stats to:
- files: ``CSV`` and ``JSON``
- databases: ``InfluxDB``, ``ElasticSearch``, ``PostgreSQL/TimeScale``, ``Cassandra``, ``CouchDB``, ``OpenTSDB``, ``Prometheus``, ``StatsD``, ``Riemann`` and ``Graphite``
- brokers: ``RabbitMQ/ActiveMQ``, ``NATS``, ``ZeroMQ`` and ``Kafka``
- others: ``RESTful`` endpoint
- ``CSV`` file
- ``JSON`` file
- ``InfluxDB`` server
- ``Cassandra`` server
- ``CouchDB`` server
- ``OpenTSDB`` server
- ``Prometheus`` server
- ``StatsD`` server
- ``ElasticSearch`` server
- ``PostgreSQL/TimeScale`` server
- ``RabbitMQ/ActiveMQ`` broker
- ``ZeroMQ`` broker
- ``Kafka`` broker
- ``Riemann`` server
- ``Graphite`` server
- ``RESTful`` endpoint
Installation 🚀
===============
Installation
============
There are several methods to test/install Glances on your system. Choose your weapon!
PyPI: Pip, the standard way
---------------------------
Glances is on ``PyPI``. By using PyPI, you will be using the latest stable version.
Glances is on ``PyPI``. By using PyPI, you will be using the latest
stable version.
To install Glances, simply use the ``pip`` command line in an virtual environment.
To install Glances, simply use the ``pip`` command line.
Warning: on modern Linux operating systems, you may have an externally-managed-environment
error message when you try to use ``pip``. In this case, go to the the PipX section below.
.. code-block:: console
cd ~
python3 -m venv ~/.venv
source ~/.venv/bin/activate
pip install glances
pip install --user glances
*Note*: Python headers are required to install `psutil`_, a Glances
dependency. For example, on Debian/Ubuntu **the simplest** is
@ -271,18 +199,17 @@ the *python-dev* package and gcc (*python-devel* on Fedora/CentOS/RHEL).
For Windows, just install psutil from the binary installation file.
By default, Glances is installed **without** the Web interface dependencies.
To install it, use the following command:
.. code-block:: console
pip install 'glances[web]'
pip install --user 'glances[web]'
For a full installation (with all features, see features list bellow):
.. code-block:: console
pip install 'glances[all]'
pip install --user 'glances[all]'
Features list:
@ -307,18 +234,21 @@ To upgrade Glances to the latest version:
.. code-block:: console
pip install --upgrade glances
pip install --user --upgrade glances
The current develop branch is published to the test.pypi.org package index.
If you want to test the develop version (could be instable), enter:
.. code-block:: console
pip install --user -i https://test.pypi.org/simple/ Glances
PyPI: PipX, the alternative way
-------------------------------
Install PipX on your system. For example on Ubuntu/Debian:
Install PipX on your system (apt install pipx on Ubuntu).
.. code-block:: console
sudo apt install pipx
Then install Glances (with all features):
Install Glances (with all features):
.. code-block:: console
@ -326,18 +256,18 @@ Then install Glances (with all features):
The glances script will be installed in the ~/.local/bin folder.
To upgrade Glances to the latest version:
.. code-block:: console
pipx upgrade glances
Docker: the cloudy way
----------------------
Glances Docker images are available. You can use it to monitor your
server and all your containers !
Get the Glances container:
.. code-block:: console
docker pull nicolargo/glances:latest-full
The following tags are available:
- *latest-full* for a full Alpine Glances image (latest release) with all dependencies
@ -382,32 +312,13 @@ Run the container in *Web server mode*:
For a full list of options, see the Glances `Docker`_ documentation page.
It is also possible to use a simple Docker compose file (see in ./docker-compose/docker-compose.yml):
.. code-block:: console
cd ./docker-compose
docker-compose up
It will start a Glances server with WebUI.
Brew: The missing package manager
---------------------------------
For Linux and Mac OS, it is also possible to install Glances with `Brew`_:
.. code-block:: console
brew install glances
GNU/Linux package
-----------------
`Glances` is available on many Linux distributions, so you should be
able to install it using your favorite package manager. Nevetheless,
i do not recommend it. Be aware that when you use this method the operating
system `package`_ for `Glances`may not be the latest version and only basics
plugins are enabled.
able to install it using your favorite package manager. Be aware that
when you use this method the operating system `package`_ for `Glances`
may not be the latest version and only basics plugins are enabled.
Note: The Debian package (and all other Debian-based distributions) do
not include anymore the JS statics files used by the Web interface
@ -426,6 +337,7 @@ Check for Python version:
# python --version
Install the Glances package:
.. code-block:: console
@ -471,7 +383,11 @@ Windows
-------
Install `Python`_ for Windows (Python 3.4+ ship with pip) and
follow the Glances Pip install procedure.
then run the following command:
.. code-block:: console
$ pip install glances
Android
-------
@ -525,8 +441,8 @@ Ansible
A Glances ``Ansible`` role is available: https://galaxy.ansible.com/zaxos/glances-ansible-role/
Shell tab completion 🔍
=======================
Shell tab completion
====================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
@ -540,15 +456,15 @@ For example, on a Linux operating system with bash shell:
Following shells are supported: bash, zsh and tcsh.
Requirements 🧩
===============
Requirements
============
Glances is developed in Python. A minimal Python version 3.10 or higher
Glances is developed in Python. A minimal Python version 3.9 or higher
should be installed on your system.
*Note for Python 2 users*
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.10).
Glances version 4 or higher do not support Python 2 (and Python 3 < 3.9).
Please uses Glances version 3.4.x if you need Python 2 support.
Dependencies:
@ -558,9 +474,8 @@ Dependencies:
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
Extra dependencies:
Optional dependencies:
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
@ -573,8 +488,9 @@ Extra dependencies:
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``jinja2`` (for templating, used under the hood by FastAPI)
- ``kafka-python`` (for the Kafka export module)
- ``nats-py`` (for the NATS export module)
- ``netifaces2`` (for the IP plugin)
- ``nvidia-ml-py`` (for the GPU plugin)
- ``pycouchdb`` (for the CouchDB export module)
- ``pika`` (for the RabbitMQ/ActiveMQ export module)
@ -594,8 +510,8 @@ Extra dependencies:
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
How to contribute ? 🤝
======================
How to contribute ?
===================
If you want to contribute to the Glances project, read this `wiki`_ page.
@ -604,8 +520,8 @@ There is also a chat dedicated to the Glances developers:
.. image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/nicolargo/glances?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
Project sponsorship 🙌
======================
Project sponsorship
===================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
@ -616,21 +532,21 @@ or just say "thank you" by:
Any and all contributions are greatly appreciated.
Authors and Contributors 🔥
===========================
Author
======
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License 📜
==========
License
=======
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
More stars ! 🌟
===============
More stars !
============
Please give us a star on `GitHub`_ if you like this project.
@ -639,18 +555,13 @@ Please give us a star on `GitHub`_ if you like this project.
:alt: Star history
.. _psutil: https://github.com/giampaolo/psutil
.. _Brew: https://formulae.brew.sh/formula/glances
.. _Python: https://www.python.org/getit/
.. _Termux: https://play.google.com/store/apps/details?id=com.termux
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://www.reddit.com/r/glances/
.. _forum: https://groups.google.com/forum/?hl=en#!forum/glances-users
.. _wiki: https://github.com/nicolargo/glances/wiki/How-to-contribute-to-Glances-%3F
.. _package: https://repology.org/project/glances/versions
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _Docker: https://github.com/nicolargo/glances/blob/master/docs/docker.rst
.. _Docker: https://github.com/nicolargo/glances/blob/develop/docs/docker.rst
.. _GitHub: https://github.com/nicolargo/glances
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html
.. _FAQ: https://github.com/nicolargo/glances/blob/develop/docs/faq.rst
.. _Discussions: https://github.com/nicolargo/glances/discussions

View File

@ -1,237 +0,0 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
annotated-doc==0.0.4
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.12.0
# via
# elasticsearch
# starlette
batinfo==0.4.2 ; sys_platform == 'linux'
# via glances
bernhard==0.2.6
# via glances
cassandra-driver==3.29.3
# via glances
certifi==2025.11.12
# via
# elastic-transport
# influxdb-client
# influxdb3-python
# requests
cffi==2.0.0 ; implementation_name == 'pypy' or platform_python_implementation != 'PyPy'
# via
# cryptography
# pyzmq
chardet==5.2.0
# via pysmart
charset-normalizer==3.4.4
# via requests
chevron==0.14.0
# via glances
click==8.1.8
# via
# geomet
# uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via click
cryptography==46.0.3
# via pysnmpcrypto
defusedxml==0.7.1
# via glances
dnspython==2.8.0
# via pymongo
docker==7.1.0
# via glances
elastic-transport==9.2.1
# via elasticsearch
elasticsearch==9.2.1
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via anyio
fastapi==0.128.0
# via glances
geomet==1.1.0
# via cassandra-driver
graphitesender==0.11.2
# via glances
h11==0.16.0
# via uvicorn
humanfriendly==10.0
# via pysmart
ibm-cloud-sdk-core==3.24.2
# via ibmcloudant
ibmcloudant==0.11.2
# via glances
idna==3.11
# via
# anyio
# requests
ifaddr==0.2.0
# via zeroconf
importlib-metadata==8.7.1
# via pygal
influxdb==5.3.2
# via glances
influxdb-client==1.49.0
# via glances
influxdb3-python==0.16.0
# via glances
jinja2==3.1.6
# via
# glances
# pysmi-lextudio
kafka-python==2.3.0
# via glances
markupsafe==3.0.3
# via jinja2
msgpack==1.1.2
# via influxdb
nats-py==2.12.0
# via glances
nvidia-ml-py==13.590.44
# via glances
packaging==25.0
# via glances
paho-mqtt==2.1.0
# via glances
pbkdf2==1.3
# via wifi
pika==1.3.2
# via glances
ply==3.11
# via pysmi-lextudio
podman==5.6.0
# via glances
potsdb==1.0.3
# via glances
prometheus-client==0.23.1
# via glances
protobuf==6.33.2
# via bernhard
psutil==7.2.1
# via glances
psycopg==3.3.2
# via glances
psycopg-binary==3.3.2 ; implementation_name != 'pypy'
# via psycopg
pyarrow==22.0.0
# via influxdb3-python
pyasn1==0.6.1
# via pysnmp-lextudio
pycparser==2.23 ; (implementation_name != 'PyPy' and platform_python_implementation != 'PyPy') or (implementation_name == 'pypy' and platform_python_implementation == 'PyPy')
# via cffi
pydantic==2.12.5
# via fastapi
pydantic-core==2.41.5
# via pydantic
pygal==3.1.0
# via glances
pyjwt==2.10.1
# via
# ibm-cloud-sdk-core
# ibmcloudant
pymdstat==0.4.3
# via glances
pymongo==4.15.5
# via glances
pyreadline3==3.5.4 ; sys_platform == 'win32'
# via humanfriendly
pysmart==1.4.2
# via glances
pysmi-lextudio==1.4.3
# via pysnmp-lextudio
pysnmp-lextudio==6.1.2
# via glances
pysnmpcrypto==0.0.4
# via pysnmp-lextudio
python-dateutil==2.9.0.post0
# via
# elasticsearch
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# influxdb-client
# influxdb3-python
pytz==2025.2
# via influxdb
pywin32==311 ; sys_platform == 'win32'
# via docker
pyzmq==27.1.0
# via glances
reactivex==4.1.0
# via
# influxdb-client
# influxdb3-python
requests==2.32.5
# via
# docker
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# podman
# pysmi-lextudio
setuptools==80.9.0
# via
# influxdb-client
# wifi
shtab==1.8.0 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# influxdb
# python-dateutil
sniffio==1.3.1
# via
# elastic-transport
# elasticsearch
sparklines==0.7.0
# via glances
starlette==0.50.0
# via fastapi
statsd==4.0.1
# via glances
termcolor==3.3.0
# via sparklines
tomli==2.0.2 ; python_full_version < '3.11'
# via podman
typing-extensions==4.15.0
# via
# anyio
# cryptography
# elasticsearch
# fastapi
# psycopg
# pydantic
# pydantic-core
# reactivex
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
tzdata==2025.3 ; sys_platform == 'win32'
# via psycopg
urllib3==2.6.2
# via
# docker
# elastic-transport
# ibm-cloud-sdk-core
# influxdb-client
# influxdb3-python
# podman
# requests
uvicorn==0.40.0
# via glances
wifi==0.3.8
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances
zeroconf==0.148.0
# via glances
zipp==3.23.0
# via importlib-metadata

View File

@ -1,9 +0,0 @@
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}

View File

@ -1,23 +0,0 @@
_____ _
/ ____| |
| | __| | __ _ _ __ ___ ___ ___
| | |_ | |/ _` | '_ \ / __/ _ \/ __|
| |__| | | (_| | | | | (_| __/\__
\_____|_|\__,_|_| |_|\___\___||___/
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} ⚡ {{ process['cpu_percent'] }}% CPU{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }} ⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ history_size=1200
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -64,10 +64,6 @@ history_size=1200
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile_password=kfp
#ssl_keyfile=./glances.local+3-key.pem
#ssl_certfile=./glances.local+3.pem
##############################################################################
# Plugins
@ -131,7 +127,7 @@ user_careful=50
user_warning=70
user_critical=90
user_log=False
#user_critical_action=echo "{{time}} User CPU {{user}} higher than {{critical}}" > /tmp/cpu.alert
#user_critical_action=echo {{user}} {{value}} {{max}} > /tmp/cpu.alert
#
system_careful=50
system_warning=70
@ -185,14 +181,12 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
#careful_action_repeat=echo {{percent}} >> /tmp/memory.alert
warning=70
critical=90
#critical_action_repeat=echo "{{time}} {{percent}} higher than {{critical}}"" >> /tmp/memory.alert
[memswap]
disable=False
@ -201,7 +195,6 @@ disable=False
careful=50
warning=70
critical=90
#warning_action=echo "{{time}} {{percent}} higher than {{warning}}"" > /tmp/memory.alert
[load]
disable=False
@ -248,9 +241,8 @@ hide_zero=False
#wlan0_tx_warning=900000
#wlan0_tx_critical=1000000
#wlan0_tx_log=True
#wlan0_rx_critical_action=echo "{{time}} {{interface_name}} RX {{bytes_recv_rate_per_sec}}Bps" > /tmp/network.alert
# Alias for network interface name
#alias=wlp0s20f3:WIFI
#alias=wlp2s0:WIFI
[ip]
# Disable display of private IP address
@ -308,32 +300,15 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=False
#dm-0_rx_log=True
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=False
#dm-0_tx_log=True
[fs]
disable=False
@ -343,19 +318,15 @@ hide=/boot.*,.*/snap.*
#show=/,/srv
# Define filesystem space thresholds in %
# Default values if not defined: 50/70/90
# It is also possible to define per mount point value
# Example: /_careful=40
careful=50
warning=70
critical=90
# It is also possible to define per mount point value
# Example: /_careful=40
#/_careful=1
#/_warning=5
#/_critical=10
#/_critical_action=echo "{{time}} {{mnt_point}} filesystem space {{percent}}% higher than {{critical}}%" > /tmp/fs.alert
# Allow additional file system types (comma-separated FS type)
#allow=shm
# Alias for root file system
#alias=/:Root,/zfspool:ZFS
#alias=/:Root,/zsfpool:ZSF
[irq]
# Documentation: https://glances.readthedocs.io/en/latest/aoa/irq.html
@ -402,8 +373,6 @@ disable=True
#hide=.*Hide_this_driver.*
# Define the list of sensors to show (comma-separated regexp)
#show=.*Drive_Temperature.*
# List of attributes to hide (comma separated)
#hide_attributes=Self-tests,Errors
[hddtemp]
disable=False
@ -415,8 +384,8 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
@ -424,11 +393,10 @@ hide=unknown.*
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
# temperature_core_Ambient_careful=40
# temperature_core_Ambient_warning=60
# temperature_core_Ambient_critical=85
# temperature_core_Ambient_log=True
# temperature_core_Ambient_critical_action=echo "{{time}} {{label}} temperature {{value}}{{unit}} higher than {{critical}}{{unit}}" > /tmp/temperature.alert
#temperature_core_Ambient_careful=45
#temperature_core_Ambient_warning=65
#temperature_core_Ambient_critical=80
#temperature_core_Ambient_log=False
# Overwrite thresholds for a specific type of sensor
#temperature_core_careful=45
#temperature_core_warning=65
@ -463,8 +431,6 @@ disable=False
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -493,8 +459,6 @@ status_critical=Z,D
# Define the list of processes to export using:
# a comma-separated list of Glances filter
#export=.*firefox.*,pid:1234
# Define a list of process to focus on (comma-separated list of Glances filter)
#focus=.*firefox.*,.*python.*
[ports]
disable=False
@ -562,8 +526,8 @@ disable=False
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
; disable_stats=diskio,networkio
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
; cpu_warning=70
@ -641,11 +605,6 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created
@ -892,14 +851,6 @@ password=password
# Most of the time, you should not overwrite this value
#hostname=mycomputer
[nats]
# Configuration for the --export nats option
# https://nats.io/
# Host is a separated list of NATS nodes
host=nats://localhost:4222
# Prefix for the subjects (default is 'glances')
prefix=glances
##############################################################################
# AMPS
# * enable: Enable (true) or disable (false) the AMP

View File

@ -1,395 +1,20 @@
# This file was autogenerated by uv via the following command:
# uv export --no-hashes --only-dev --output-file dev-requirements.txt
alabaster==1.0.0
# via sphinx
annotated-types==0.7.0
# via pydantic
anyio==4.12.0
# via
# httpx
# mcp
# sse-starlette
# starlette
attrs==25.4.0
# via
# glom
# jsonschema
# outcome
# referencing
# reuse
# semgrep
# trio
babel==2.17.0
# via sphinx
boltons==21.0.0
# via
# face
# glom
# semgrep
boolean-py==5.0
# via license-expression
bracex==2.6
# via wcmatch
certifi==2025.11.12
# via
# httpcore
# httpx
# requests
# selenium
cffi==2.0.0 ; (implementation_name != 'pypy' and os_name == 'nt') or platform_python_implementation != 'PyPy'
# via
# cryptography
# trio
cfgv==3.5.0
# via pre-commit
charset-normalizer==3.4.4
# via
# python-debian
# requests
click==8.1.8
# via
# click-option-group
# reuse
# semgrep
# typer
# uvicorn
click-option-group==0.5.9
# via semgrep
codespell==2.4.1
colorama==0.4.6
# via
# click
# pytest
# semgrep
# sphinx
contourpy==1.3.2 ; python_full_version < '3.11'
# via matplotlib
contourpy==1.3.3 ; python_full_version >= '3.11'
# via matplotlib
cryptography==46.0.3
# via pyjwt
cycler==0.12.1
# via matplotlib
distlib==0.4.0
# via virtualenv
docutils==0.21.2
# via
# rstcheck-core
# sphinx
# sphinx-rtd-theme
exceptiongroup==1.2.2
# via
# anyio
# pytest
# semgrep
# trio
# trio-websocket
face==24.0.0
# via glom
filelock==3.20.2
# via virtualenv
fonttools==4.61.1
# via matplotlib
glom==22.1.0
# via semgrep
googleapis-common-protos==1.72.0
# via opentelemetry-exporter-otlp-proto-http
gprof2dot==2025.4.14
h11==0.16.0
# via
# httpcore
# uvicorn
# wsproto
httpcore==1.0.9
# via httpx
httpx==0.28.1
# via mcp
httpx-sse==0.4.3
# via mcp
identify==2.6.15
# via pre-commit
idna==3.11
# via
# anyio
# httpx
# requests
# trio
imagesize==1.4.1
# via sphinx
importlib-metadata==8.7.1
# via opentelemetry-api
iniconfig==2.3.0
# via pytest
jinja2==3.1.6
# via
# reuse
# sphinx
jsonschema==4.25.1
# via
# mcp
# semgrep
jsonschema-specifications==2025.9.1
# via jsonschema
kiwisolver==1.4.9
# via matplotlib
license-expression==30.4.4
# via reuse
markdown-it-py==4.0.0
# via rich
markupsafe==3.0.3
# via jinja2
matplotlib==3.10.8
mcp==1.23.3
# via semgrep
mdurl==0.1.2
# via markdown-it-py
memory-profiler==0.61.0
nodeenv==1.10.0
# via
# pre-commit
# pyright
numpy==2.2.6 ; python_full_version < '3.11'
# via
# contourpy
# matplotlib
numpy==2.4.0 ; python_full_version >= '3.11'
# via
# contourpy
# matplotlib
opentelemetry-api==1.37.0
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# semgrep
opentelemetry-exporter-otlp-proto-common==1.37.0
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-http==1.37.0
# via semgrep
opentelemetry-instrumentation==0.58b0
# via opentelemetry-instrumentation-requests
opentelemetry-instrumentation-requests==0.58b0
# via semgrep
opentelemetry-proto==1.37.0
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-sdk==1.37.0
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
opentelemetry-semantic-conventions==0.58b0
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
opentelemetry-util-http==0.58b0
# via opentelemetry-instrumentation-requests
outcome==1.3.0.post0
# via
# trio
# trio-websocket
packaging==25.0
# via
# matplotlib
# opentelemetry-instrumentation
# pytest
# requirements-parser
# semgrep
# sphinx
# webdriver-manager
peewee==3.18.3
# via semgrep
pillow==12.1.0
# via matplotlib
platformdirs==4.5.1
# via virtualenv
pluggy==1.6.0
# via pytest
pre-commit==4.5.1
protobuf==6.33.2
# via
# googleapis-common-protos
# opentelemetry-proto
psutil==7.2.1
# via memory-profiler
py-spy==0.4.1
pycparser==2.23 ; (implementation_name != 'PyPy' and implementation_name != 'pypy' and os_name == 'nt') or (implementation_name != 'PyPy' and platform_python_implementation != 'PyPy')
# via cffi
pydantic==2.12.5
# via
# mcp
# pydantic-settings
# rstcheck-core
pydantic-core==2.41.5
# via pydantic
pydantic-settings==2.12.0
# via mcp
pygments==2.19.2
# via
# pytest
# rich
# sphinx
pyinstrument==5.1.1
pyjwt==2.10.1
# via mcp
pyparsing==3.3.1
# via matplotlib
pyright==1.1.407
pysocks==1.7.1
# via urllib3
pytest==9.0.2
python-dateutil==2.9.0.post0
# via matplotlib
python-debian==1.0.1
# via reuse
python-dotenv==1.2.1
# via
# pydantic-settings
# webdriver-manager
python-magic==0.4.27
# via reuse
python-multipart==0.0.21
# via mcp
pywin32==311 ; sys_platform == 'win32'
# via
# mcp
# semgrep
pyyaml==6.0.3
# via pre-commit
referencing==0.37.0
# via
# jsonschema
# jsonschema-specifications
requests==2.32.5
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
# sphinx
# webdriver-manager
requirements-parser==0.13.0
reuse==6.2.0
rich==13.5.3
# via
# semgrep
# typer
roman-numerals==4.1.0 ; python_full_version >= '3.11'
# via roman-numerals-py
roman-numerals-py==4.1.0 ; python_full_version >= '3.11'
# via sphinx
rpds-py==0.30.0
# via
# jsonschema
# referencing
rstcheck==6.2.5
rstcheck-core==1.2.2
# via rstcheck
ruamel-yaml==0.19.1
# via semgrep
ruamel-yaml-clib==0.2.14
# via semgrep
ruff==0.14.10
selenium==4.39.0
semgrep==1.146.0
setuptools==80.9.0
shellingham==1.5.4
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via trio
snowballstemmer==3.0.1
# via sphinx
sortedcontainers==2.4.0
# via trio
sphinx==8.1.3 ; python_full_version < '3.11'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx==8.2.3 ; python_full_version >= '3.11'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
# via sphinx
sphinxcontrib-devhelp==2.0.0
# via sphinx
sphinxcontrib-htmlhelp==2.1.0
# via sphinx
sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
sse-starlette==3.1.2
# via mcp
starlette==0.50.0
# via
# mcp
# sse-starlette
tomli==2.0.2
# via
# pytest
# semgrep
# sphinx
tomlkit==0.13.3
# via reuse
trio==0.32.0
# via
# selenium
# trio-websocket
trio-websocket==0.12.2
# via selenium
typer==0.21.0
# via rstcheck
typing-extensions==4.15.0
# via
# anyio
# cryptography
# mcp
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# pyright
# referencing
# selenium
# semgrep
# starlette
# typer
# typing-inspection
# uvicorn
# virtualenv
typing-inspection==0.4.2
# via
# mcp
# pydantic
# pydantic-settings
urllib3==2.6.2
# via
# requests
# selenium
# semgrep
uvicorn==0.40.0 ; sys_platform != 'emscripten'
# via mcp
virtualenv==20.35.4
# via pre-commit
wcmatch==8.5.2
# via semgrep
webdriver-manager==4.0.2
websocket-client==1.9.0
# via selenium
wrapt==1.17.3
# via opentelemetry-instrumentation
wsproto==1.3.2
# via trio-websocket
zipp==3.23.0
# via importlib-metadata
codespell
coverage
fonttools>=4.43.0 # not directly required, pinned by Snyk to avoid a vulnerability
gprof2dot
matplotlib
memory-profiler
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
pre-commit
py-spy
pyright
pytest
requirements-parser
rstcheck
ruff
selenium
semgrep; platform_system == 'Linux'
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
webdriver-manager
h11>=0.16.0 # not directly required, pinned by Snyk to avoid a vulnerability

7
doc-requirements.txt Normal file
View File

@ -0,0 +1,7 @@
psutil
defusedxml
orjson
reuse
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
sphinx
sphinx_rtd_theme

View File

@ -0,0 +1,3 @@
FROM glances:local-alpine-minimal as glancesminimal
COPY glances.conf /glances/conf/glances.conf
CMD python -m glances -C /glances/conf/glances.conf $GLANCES_OPT

View File

@ -0,0 +1,40 @@
version: "3.9"
services:
reverse-proxy:
image: traefik
command: --api --docker
ports:
- "80:80"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
whoami:
image: emilevauge/whoami
labels:
- "traefik.frontend.rule=Host:whoami.docker.localhost"
monitoring:
image: nicolargo/glances:dev
restart: unless-stopped
pid: host
privileged: true
network_mode: "host"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro"
- "./glances.conf:/glances/conf/glances.conf"
environment:
- TZ=${TZ}
- "GLANCES_OPT=-C /glances/conf/glances.conf -w"
# Uncomment for GPU compatibility (Nvidia) inside the container
# deploy:
# resources:
# reservations:
# devices:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
labels:
- "traefik.port=61208"
- "traefik.frontend.rule=Host:glances.docker.localhost"

View File

@ -1,41 +1,20 @@
version: '3.9'
services:
glances:
# See all images tags here: https://hub.docker.com/r/nicolargo/glances/tags
image: nicolargo/glances:latest-full
build:
context: ./
dockerfile: Dockerfile
restart: always
pid: "host"
privileged: true
network_mode: "host"
read_only: true
privileged: false
# Uncomment next line for SATA or NVME smartctl monitoring
# cap_add:
# Uncomment next line for SATA smartctl monitoring
# - SYS_RAWIO
# Uncomment next line for NVME smartctl monitoring
# - SYS_ADMIN
# devices:
# - "/dev/nvme0"
volumes:
- "/:/rootfs:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro"
- "./glances.conf:/glances/conf/glances.conf"
# Uncomment for proper distro information in upper panel.
# # Works only for distros that do have this file (most of distros do).
# - "/etc/os-release:/etc/os-release:ro"
tmpfs:
- /tmp
environment:
# Please set to your local timezone (or use local ${TZ} environment variable if set on your host)
- TZ=Europe/Paris
- GLANCES_OPT=-C /glances/conf/glances.conf -w --enable-plugin smart
- PYTHONPYCACHEPREFIX=/tmp/py_caches
- TZ=${TZ}
- GLANCES_OPT=-C /glances/conf/glances.conf -w
# # Uncomment for GPU compatibility (Nvidia) inside the container
# deploy:
# resources:
@ -44,11 +23,10 @@ services:
# - driver: nvidia
# count: 1
# capabilities: [gpu]
# Uncomment to protect Glances WebUI by a login/password (add --password to GLANCES_OPT)
# secrets:
# - source: glances_password
# target: /root/.config/glances/<login>.pwd
# # Uncomment to protect Glances WebUI by a login/password (add --password to GLANCES_OPT)
# secrets:
# - source: glances_password
# target: /root/.config/glances/<login>.pwd
# secrets:
# glances_password:

90
docker-compose/glances.conf Normal file → Executable file
View File

@ -13,7 +13,7 @@ check_update=False
# Default is 1200 values (~1h with the default refresh rate)
history_size=1200
# Set the way Glances should display the date (default is %Y-%m-%d %H:%M:%S %Z)
#strftime_format=%Y-%m-%d %H:%M:%S %Z
# strftime_format=%Y-%m-%d %H:%M:%S %Z
# Define external directory for loading additional plugins
# The layout follows the glances standard for plugin definitions
#plugin_dir=/home/user/dev/plugins
@ -49,7 +49,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -64,10 +64,6 @@ max_processes_display=25
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile_password=kfp
#ssl_keyfile=./glances.local+3-key.pem
#ssl_certfile=./glances.local+3.pem
##############################################################################
# Plugins
@ -131,7 +127,7 @@ user_careful=50
user_warning=70
user_critical=90
user_log=False
#user_critical_action=echo "{{time}} User CPU {{user}} higher than {{critical}}" > /tmp/cpu.alert
#user_critical_action=echo {{user}} {{value}} {{max}} > /tmp/cpu.alert
#
system_careful=50
system_warning=70
@ -185,14 +181,12 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
#careful_action_repeat=echo {{percent}} >> /tmp/memory.alert
warning=70
critical=90
#critical_action_repeat=echo "{{time}} {{percent}} higher than {{critical}}"" >> /tmp/memory.alert
[memswap]
disable=False
@ -201,7 +195,6 @@ disable=False
careful=50
warning=70
critical=90
#warning_action=echo "{{time}} {{percent}} higher than {{warning}}"" > /tmp/memory.alert
[load]
disable=False
@ -248,9 +241,8 @@ hide_zero=False
#wlan0_tx_warning=900000
#wlan0_tx_critical=1000000
#wlan0_tx_log=True
#wlan0_rx_critical_action=echo "{{time}} {{interface_name}} RX {{bytes_recv_rate_per_sec}}Bps" > /tmp/network.alert
# Alias for network interface name
#alias=wlp0s20f3:WIFI
#alias=wlp2s0:WIFI
[ip]
# Disable display of private IP address
@ -308,32 +300,15 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=False
#dm-0_rx_log=True
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=False
#dm-0_tx_log=True
[fs]
disable=False
@ -343,15 +318,11 @@ hide=/boot.*,.*/snap.*
#show=/,/srv
# Define filesystem space thresholds in %
# Default values if not defined: 50/70/90
# It is also possible to define per mount point value
# Example: /_careful=40
careful=50
warning=70
critical=90
# It is also possible to define per mount point value
# Example: /_careful=40
#/_careful=1
#/_warning=5
#/_critical=10
#/_critical_action=echo "{{time}} {{mnt_point}} filesystem space {{percent}}% higher than {{critical}}%" > /tmp/fs.alert
# Allow additional file system types (comma-separated FS type)
#allow=shm
# Alias for root file system
@ -402,8 +373,6 @@ disable=True
#hide=.*Hide_this_driver.*
# Define the list of sensors to show (comma-separated regexp)
#show=.*Drive_Temperature.*
# List of attributes to hide (comma separated)
#hide_attributes=Self-tests,Errors
[hddtemp]
disable=False
@ -415,8 +384,8 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
@ -424,11 +393,10 @@ hide=unknown.*
# Sensors core thresholds (in Celsius...)
# By default values are grabbed from the system
# Overwrite thresholds for a specific sensor
# temperature_core_Ambient_careful=40
# temperature_core_Ambient_warning=60
# temperature_core_Ambient_critical=85
# temperature_core_Ambient_log=True
# temperature_core_Ambient_critical_action=echo "{{time}} {{label}} temperature {{value}}{{unit}} higher than {{critical}}{{unit}}" > /tmp/temperature.alert
#temperature_core_Ambient_careful=45
#temperature_core_Ambient_warning=65
#temperature_core_Ambient_critical=80
#temperature_core_Ambient_log=False
# Overwrite thresholds for a specific type of sensor
#temperature_core_careful=45
#temperature_core_warning=65
@ -463,8 +431,6 @@ disable=False
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -493,8 +459,6 @@ status_critical=Z,D
# Define the list of processes to export using:
# a comma-separated list of Glances filter
#export=.*firefox.*,pid:1234
# Define a list of process to focus on (comma-separated list of Glances filter)
#focus=.*firefox.*,.*python.*
[ports]
disable=False
@ -546,8 +510,7 @@ port_default_gateway=False
disable=True
# Define the maximum VMs size name (default is 20 chars)
max_name_size=20
# By default, Glances only display running VMs with states:
# 'Running', 'Paused', 'Starting' or 'Restarting'
# By default, Glances only display running VMs with states: 'Running', 'Starting' or 'Restarting'
# Set the following key to True to display all VMs regarding their states
all=False
@ -562,8 +525,8 @@ disable=False
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
; disable_stats=diskio,networkio
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
; cpu_warning=70
@ -641,11 +604,6 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created
@ -664,7 +622,7 @@ style=DarkStyle
[influxdb]
# !!!
# Will be DEPRECATED in future release.
# Please have a look on the new influxdb3 export module
# Please have a look on the new influxdb2 export module (compatible with InfluxDB 1.8.x and 2.x)
# !!!
# Configuration for the --export influxdb option
# https://influxdb.com/
@ -892,14 +850,6 @@ password=password
# Most of the time, you should not overwrite this value
#hostname=mycomputer
[nats]
# Configuration for the --export nats option
# https://nats.io/
# Host is a separated list of NATS nodes
host=nats://localhost:4222
# Prefix for the subjects (default is 'glances')
prefix=glances
##############################################################################
# AMPS
# * enable: Enable (true) or disable (false) the AMP

View File

@ -9,7 +9,7 @@
# WARNING: the Alpine image version and Python version should be set.
# Alpine 3.18 tag is a link to the latest 3.18.x version.
# Be aware that if you change the Alpine version, you may have to change the Python version.
ARG IMAGE_VERSION=3.23
ARG IMAGE_VERSION=3.22
ARG PYTHON_VERSION=3.12
##############################################################################
@ -61,9 +61,12 @@ RUN apk add --no-cache \
RUN python${PYTHON_VERSION} -m venv venv-build
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --upgrade pip
RUN python${PYTHON_VERSION} -m venv venv-build
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --upgrade pip
RUN python${PYTHON_VERSION} -m venv --without-pip venv
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
@ -71,7 +74,9 @@ FROM build AS buildminimal
ARG PYTHON_VERSION
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r docker-requirements.txt
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
##############################################################################
# BUILD: Install all the deps
@ -84,7 +89,8 @@ ARG CASS_DRIVER_NO_CYTHON=1
ARG CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r all-requirements.txt
-r requirements.txt \
-r optional-requirements.txt
##############################################################################
# RELEASE Stages
@ -102,18 +108,17 @@ COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# Copy binary and update PATH
COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# EXPOSE PORT (XMLRPC / WebUI)
EXPOSE 61209 61208
# Add glances user
# RUN addgroup -g 1000 glances && \
# adduser -D -u 1000 -G glances glances && \
# chown -R glances:glances /app
# Define default command.
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD ["/bin/sh", "-c", "/venv/bin/python3 -m glances ${GLANCES_OPT}"]
################################################################################
# RELEASE: minimal
@ -121,8 +126,6 @@ FROM release AS minimal
COPY --from=buildminimal /venv /venv
# USER glances
################################################################################
# RELEASE: full
FROM release AS full
@ -131,8 +134,6 @@ RUN apk add --no-cache libzmq
COPY --from=buildfull /venv /venv
# USER glances
################################################################################
# RELEASE: dev - to be compatible with CI
FROM full AS dev
@ -142,8 +143,5 @@ FROM full AS dev
COPY ./docker-files/docker-logger.json /app
ENV LOG_CFG=/app/docker-logger.json
# USER glances
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD ["/bin/sh", "-c", "/venv/bin/python3 -m glances ${GLANCES_OPT}"]

View File

@ -1,24 +1,22 @@
{
"version": 1,
"disable_existing_loggers": "False",
"root": { "level": "INFO", "handlers": ["console"] },
"formatters": {
"standard": { "format": "%(asctime)s -- %(levelname)s -- %(message)s" },
"short": { "format": "%(levelname)s -- %(message)s" },
"long": {
"format": "%(asctime)s -- %(levelname)s -- %(message)s (%(funcName)s in %(filename)s)"
},
"free": { "format": "%(message)s" }
},
"handlers": {
"console": { "class": "logging.StreamHandler", "formatter": "standard" }
},
"loggers": {
"debug": { "handlers": ["console"], "level": "DEBUG" },
"verbose": { "handlers": ["console"], "level": "INFO" },
"standard": { "handlers": ["console"], "level": "INFO" },
"requests": { "handlers": ["console"], "level": "ERROR" },
"elasticsearch": { "handlers": ["console"], "level": "ERROR" },
"elasticsearch.trace": { "handlers": ["console"], "level": "ERROR" }
}
}
"version": 1,
"disable_existing_loggers": "False",
"root": {"level": "INFO", "handlers": ["console"]},
"formatters": {
"standard": {"format": "%(asctime)s -- %(levelname)s -- %(message)s"},
"short": {"format": "%(levelname)s -- %(message)s"},
"long": {"format": "%(asctime)s -- %(levelname)s -- %(message)s (%(funcName)s in %(filename)s)"},
"free": {"format": "%(message)s"}
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "standard"}
},
"loggers": {
"debug": {"handlers": ["console"], "level": "DEBUG"},
"verbose": {"handlers": ["console"], "level": "INFO"},
"standard": {"handlers": ["console"], "level": "INFO"},
"requests": {"handlers": ["console"], "level": "ERROR"},
"elasticsearch": {"handlers": ["console"], "level": "ERROR"},
"elasticsearch.trace": {"handlers": ["console"], "level": "ERROR"}
}
}

View File

@ -55,7 +55,7 @@ RUN apt-get clean \
RUN python3 -m venv --without-pip venv
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
@ -63,7 +63,9 @@ FROM build AS buildminimal
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r docker-requirements.txt
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
##############################################################################
# BUILD: Install all the deps
@ -71,7 +73,8 @@ FROM build AS buildfull
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r all-requirements.txt
-r requirements.txt \
-r optional-requirements.txt
##############################################################################
# RELEASE Stages
@ -89,21 +92,17 @@ COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# Copy binary and update PATH
COPY docker-bin.sh /usr/local/bin/glances
RUN chmod a+x /usr/local/bin/glances
ENV PATH="/venv/bin:$PATH"
# EXPOSE PORT (XMLRPC / WebUI)
EXPOSE 61209 61208
# Add glances user
# NOTE: If used, the Glances Docker plugin do not work...
# UID and GUID 1000 are already configured for the ubuntu user
# Create anew one with UID and GUID 1001
# RUN groupadd -g 1001 glances && \
# useradd -u 1001 -g glances glances && \
# chown -R glances:glances /app
# Define default command.
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD ["/bin/sh", "-c", "/venv/bin/python3 -m glances ${GLANCES_OPT}"]
################################################################################
# RELEASE: minimal
@ -112,8 +111,6 @@ ARG PYTHON_VERSION
COPY --from=buildMinimal /venv /venv
# USER glances
################################################################################
# RELEASE: full
FROM release AS full
@ -126,8 +123,6 @@ RUN apt-get update \
COPY --from=buildfull /venv /venv
# USER glances
################################################################################
# RELEASE: dev - to be compatible with CI
FROM full AS dev
@ -138,8 +133,5 @@ ARG PYTHON_VERSION
COPY ./docker-files/docker-logger.json /app
ENV LOG_CFG=/app/docker-logger.json
# USER glances
WORKDIR /app
ENV PYTHON_VERSION=${PYTHON_VERSION}
CMD ["/bin/sh", "-c", "/venv/bin/python${PYTHON_VERSION} -m glances ${GLANCES_OPT}"]
CMD ["/bin/sh", "-c", "/venv/bin/python3 -m glances ${GLANCES_OPT}"]

View File

@ -1,83 +1,10 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
annotated-doc==0.0.4
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.12.0
# via starlette
certifi==2025.11.12
# via requests
charset-normalizer==3.4.4
# via requests
click==8.1.8
# via uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via click
defusedxml==0.7.1
# via glances
docker==7.1.0
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via anyio
fastapi==0.128.0
# via glances
h11==0.16.0
# via uvicorn
idna==3.11
# via
# anyio
# requests
jinja2==3.1.6
# via glances
markupsafe==3.0.3
# via jinja2
packaging==25.0
# via glances
podman==5.6.0
# via glances
psutil==7.2.1
# via glances
pydantic==2.12.5
# via fastapi
pydantic-core==2.41.5
# via pydantic
python-dateutil==2.9.0.post0
# via glances
pywin32==311 ; sys_platform == 'win32'
# via docker
requests==2.32.5
# via
# docker
# glances
# podman
shtab==1.8.0 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# python-dateutil
starlette==0.50.0
# via fastapi
tomli==2.0.2 ; python_full_version < '3.11'
# via podman
typing-extensions==4.15.0
# via
# anyio
# fastapi
# pydantic
# pydantic-core
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
urllib3==2.6.2
# via
# docker
# podman
# requests
uvicorn==0.40.0
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances
# install with base requirements file
-r requirements.txt
docker>=6.1.1
orjson # JSON Serialization speedup
podman
python-dateutil
requests
six
urllib3

View File

@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = ../.venv/bin/sphinx-build
SPHINXBUILD = ../venv/bin/sphinx-build
PAPER =
BUILDDIR = _build

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 203 KiB

After

Width:  |  Height:  |  Size: 256 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 83 KiB

View File

@ -3,7 +3,7 @@
Actions
=======
Glances can trigger actions on events for warning and critical thresholds.
Glances can trigger actions on events.
By ``action``, we mean all shell command line. For example, if you want
to execute the ``foo.py`` script if the last 5 minutes load are critical
@ -18,13 +18,6 @@ then add the ``_action`` line to the Glances configuration file:
All the stats are available in the command line through the use of the
`Mustache`_ syntax. `Chevron`_ is required to render the mustache's template syntax.
Additionaly to the stats of the current plugin, the following variables are
also available:
- ``{{time}}``: current time in ISO format
- ``{{critical}}``: critical threshold value
- ``{{warning}}``: warning threshold value
- ``{{careful}}``: careful threshold value
Another example would be to create a log file
containing used vs total disk space if a space trigger warning is
reached:
@ -33,7 +26,7 @@ reached:
[fs]
warning=70
warning_action=echo "{{time}} {{mnt_point}} {{used}}/{{size}}" > /tmp/fs.alert
warning_action=echo {{mnt_point}} {{used}}/{{size}} > /tmp/fs.alert
A last example would be to create a log file containing the total user disk
space usage for a device and notify by email each time a space trigger
@ -43,11 +36,13 @@ critical is reached:
[fs]
critical=90
critical_action_repeat=echo "{{time}} {{device_name}} {{percent}}" > /tmp/fs.alert && python /etc/glances/actions.d/fs-critical.py
critical_action_repeat=echo {{device_name}} {{percent}} > /tmp/fs.alert && python /etc/glances/actions.d/fs-critical.py
.. note::
Use && as separator for multiple commands
Within ``/etc/glances/actions.d/fs-critical.py``:
.. code-block:: python
@ -68,7 +63,7 @@ Within ``/etc/glances/actions.d/fs-critical.py``:
.. note::
You can use all the stats for the current plugin. See
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
for the stats list.
It is also possible to repeat action until the end of the alert.

View File

@ -32,8 +32,8 @@ under the ``[containers]`` section:
# Define the maximum containers size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
disable_stats=diskio,networkio
# Global containers' thresholds for CPU and MEM (in %)
cpu_careful=50
cpu_warning=70

View File

@ -5,16 +5,21 @@ Disk I/O
.. image:: ../_static/diskio.png
Glances displays the disk I/O throughput, count and mean latency:
Glances displays the disk I/O throughput. The unit is adapted
dynamically.
You can display:
- bytes per second (default behavior / Bytes/s, KBytes/s, MBytes/s, etc)
- requests per second (using --diskio-iops option or *B* hotkey)
- mean latency (using --diskio-latency option or *L* hotkey)
It's also possible to define:
There is no alert on this information.
It's possible to define:
- a list of disk to show (white list)
- a list of disks to hide
- aliases for disk name (use \ to espace special characters)
- aliases for disk name
under the ``[diskio]`` section in the configuration file.
@ -37,20 +42,13 @@ Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
It is also possible to define thesholds for latency and bytes read and write per second:
It is also possible to define thesholds for bytes read and write per second:
.. code-block:: ini
[diskio]
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
dm-0_rx_careful=4000000000
dm-0_rx_warning=5000000000

View File

@ -35,11 +35,6 @@ system:
[fs]
allow=shm
With the above configuration key, it is also possible to monitor NFS
mount points (allow=nfs). Be aware that this can slow down the
performance of the plugin if the NFS server is not reachable. In this
case, the plugin will wait for a 2 seconds timeout.
Also, you can hide mount points using regular expressions.
To hide all mount points starting with /boot and /snap:

View File

@ -27,7 +27,7 @@ Stats description:
is in RAM.
- **inactive**: (UNIX): memory that is marked as not used.
- **buffers**: (Linux, BSD): cache for things like file system metadata.
- **cached**: (Linux, BSD): cache for various things (including ZFS cache).
- **cached**: (Linux, BSD): cache for various things.
Additional stats available in through the API:
@ -41,10 +41,6 @@ Additional stats available in through the API:
- **shared**: (BSD): memory that may be simultaneously accessed by multiple
processes.
It is possible to display the available memory instead of the used memory
by setting the ``available`` option to ``True`` in the configuration file
under the ``[mem]`` section.
A character is also displayed just after the MEM header and shows the
trend value:

View File

@ -20,7 +20,7 @@ Additionally, you can define:
- automatically hide interfaces not up
- automatically hide interfaces without IP address
- per-interface limit values
- aliases for interface name (use \ to espace special characters)
- aliases for interface name
The configuration should be done in the ``[network]`` section of the
Glances configuration file.
@ -72,7 +72,7 @@ can also be used to set a threshold higher than zero.
.. code-block:: ini
[network]
[diskio]
hide_zero=True
hide_threshold_bytes=0

View File

@ -149,24 +149,12 @@ Columns display
pressing on the ``'/'`` key
========================= ==============================================
Disable display of virtual memory
---------------------------------
It's possible to disable the display of the VIRT column (virtual memory) by adding the
``disable_virtual_memory=True`` option in the ``[processlist]`` section of the configuration
file (glances.conf):
.. code-block:: ini
[processlist]
disable_virtual_memory=True
Process filtering
-----------------
It's possible to filter the processes list using the ``ENTER`` key.
Glances filter syntax is the following (examples):
Filter syntax is the following (examples):
- ``python``: Filter processes name or command line starting with
*python* (regexp)
@ -175,25 +163,6 @@ Glances filter syntax is the following (examples):
- ``username:nicolargo``: Processes of nicolargo user (key:regexp)
- ``cmdline:\/usr\/bin.*``: Processes starting by */usr/bin*
Process focus
-------------
It's also possible to select a processes list to focus on.
A list of Glances filters (see upper) can be define from the command line:
.. code-block:: bash
glances --process-focus .*python.*,.*firefox.*
or the glances.conf file:
.. code-block:: ini
[processlist]
focus=.*python.*,.*firefox.*
Extended info
-------------

View File

@ -33,7 +33,6 @@ thresholds (default behavor).
#temperature_core_careful=45
#temperature_core_warning=65
#temperature_core_critical=80
#alias=temp1:Motherboard 0,core 0:CPU Core 0
.. note 1::
The support for multiple batteries is only available if

View File

@ -44,11 +44,4 @@ Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
.. _regex101: https://regex101.com/
You can also hide attributes, for example Self-tests, Errors, etc. Use a comma separated list.
.. code-block:: ini
[smart]
hide_attributes=attribute_name1,attribute_name2
.. _regex101: https://regex101.com/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,2 @@
#!/bin/sh
make clean
make html
LC_ALL=C make man

View File

@ -416,17 +416,11 @@ The following commands (key pressed) are supported while in Glances:
``F5`` or ``CTRL-R``
Refresh user interface
``SHIFT-LEFT``
``LEFT``
Navigation left through the process sort
``SHIFT-RIGHT``
Navigation right through the process sort
``LEFT``
Navigation left through the process name
``RIGHT``
Navigation right through the process name
Navigation right through the process sort
``UP``
Up in the processes list

View File

@ -80,7 +80,7 @@ than a second one concerning the user interface:
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -95,10 +95,6 @@ than a second one concerning the user interface:
# Comma separated list of HTTP request headers that should be supported for cross-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile=./glances.local+3-key.pem
#ssl_keyfile_password=kfp
#ssl_certfile=./glances.local+3.pem
Each plugin, export module, and application monitoring process (AMP) can
have a section. Below is an example for the CPU plugin:

View File

@ -187,7 +187,7 @@ and make it visible to your container by adding it to ``docker-compose.yml`` as
image: nicolargo/glances:latest
restart: always
environment:
- "GLANCES_OPT=-w --password"
- GLANCES_OPT="-w --password"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Uncomment the below line if you want glances to display host OS detail instead of container's

View File

@ -18,36 +18,3 @@ On ARM64, Docker needs to be configured to allow access to the memory stats.
Edit the /boot/firmware/cmdline.txt and add the following configuration key:
cgroup_enable=memory
Netifaces issue ?
-----------------
Previously, Glances uses Netifaces to get network interfaces information.
Now, Glances uses Netifaces2.
Please uninstall Netifaces and install Netifaces2 instead.
Extra note: Glances 4.5 or higher do not use Netifaces/Netifaces2 anymore.
On Debian/Ubuntu Operating Systems, Webserver display a blank screen ?
----------------------------------------------------------------------
For some reason, the Glances Debian/Ubuntu packages do not include the Web UI static files.
Please read: https://github.com/nicolargo/glances/issues/2021 for workaround and more information.
Glances said that my computer has no free memory, is it normal ?
----------------------------------------------------------------
On Linux, Glances shows by default the free memory.
Free memory can be low, it's a "normal" behavior because Linux uses free memory for disk caching
to improve performance. More information can be found here: https://linuxatemyram.com/.
If you want to display the "available" memory instead of the "free" memory, you can uses the
the following configuration key in the Glances configuration file:
[mem]
# Display available memory instead of used memory
available=True

View File

@ -1,46 +0,0 @@
.. _fetch:
Fetch
=====
The fetch mode is used to get and share a quick look of a machine using the
``fetch`` option. In this mode, current stats are displayed on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Some examples are provided in the ``conf/fetch-templates/`` directory. Please feel free to
customize them or create your own template (contribution via PR are welcome).
The format of the template is based on the Jinja2 templating engine and can use all the stats
available in Glances through the ``gl`` variable (an instance of the :ref:`Glances Python API<api>`).
For example, the default template is define as:
.. code-block:: jinja
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} {{ '%0.2f'| format(gl.load['min5']) }} {{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} {{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} {{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}{% for net in gl.network.keys() %}📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s ↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} ⚡ {{ process['cpu_percent'] }}% CPU{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }} 🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }} ⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

View File

@ -1,110 +0,0 @@
.. _duckdb:
DuckDB
===========
DuckDB is an in-process SQL OLAP database management system.
You can export statistics to a ``DuckDB`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[duckdb]
# database defines where data are stored, can be one of:
# /path/to/glances.db (see https://duckdb.org/docs/stable/clients/python/dbapi#file-based-connection)
# :memory:glances (see https://duckdb.org/docs/stable/clients/python/dbapi#in-memory-connection)
# Or anyone else supported by the API (see https://duckdb.org/docs/stable/clients/python/dbapi)
database=/tmp/glances.db
and run Glances with:
.. code-block:: console
$ glances --export duckdb
Data model
-----------
The data model is composed of one table per Glances plugin.
Example:
.. code-block:: python
>>> import duckdb
>>> db = duckdb.connect(database='/tmp/glances.db', read_only=True)
>>> db.sql("SELECT * from cpu")
┌─────────────────────┬─────────────────┬────────┬────────┬────────┬───┬────────────────────┬─────────────────────┬──────────────────────┬──────────────────────┬──────────────────────┐
│ time │ hostname_id │ total │ user │ nice │ … │ cpu_iowait_warning │ cpu_iowait_critical │ cpu_ctx_switches_c… │ cpu_ctx_switches_w… │ cpu_ctx_switches_c… │
│ time with time zone │ varchar │ double │ double │ double │ │ double │ double │ double │ double │ double │
├─────────────────────┼─────────────────┼────────┼────────┼────────┼───┼────────────────────┼─────────────────────┼──────────────────────┼──────────────────────┼──────────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ 8.0 │ 5.6 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:27+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:29+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:31+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:33+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:35+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:37+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:39+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:41+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:43+00 │ nicolargo-xps15 │ 12.2 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:32+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:34+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:36+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:38+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:40+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:42+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:44+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:46+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:48+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
├─────────────────────┴─────────────────┴────────┴────────┴────────┴───┴────────────────────┴─────────────────────┴──────────────────────┴──────────────────────┴──────────────────────┤
│ 41 rows (20 shown) 47 columns (10 shown) │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
>>> db.sql("SELECT * from cpu").fetchall()[0]
(datetime.time(11, 50, 25, tzinfo=datetime.timezone.utc), 'nicolargo-xps15', 8.0, 5.6, 0.0, 2.3, 91.9, 0.1, 0.0, 0.0, 0.0, 0, 0, 0, 0, 16, 2.4103684425354004, 90724823, 0, 63323797, 0, 30704572, 0, 0, 0, 1200.0, 65.0, 75.0, 85.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, 5.0, 5.625, 6.25, 640000.0, 720000.0, 800000.0)
>>> db.sql("SELECT * from network")
┌─────────────────────┬─────────────────┬────────────────┬────────────┬────────────┬───┬─────────────────────┬────────────────┬────────────────────┬────────────────────┬───────────────────┐
│ time │ hostname_id │ key_id │ bytes_sent │ bytes_recv │ … │ network_tx_critical │ network_hide │ network_hide_no_up │ network_hide_no_ip │ network_hide_zero │
│ time with time zone │ varchar │ varchar │ int64 │ int64 │ │ double │ varchar │ boolean │ boolean │ boolean │
├─────────────────────┼─────────────────┼────────────────┼────────────┼────────────┼───┼─────────────────────┼────────────────┼────────────────────┼────────────────────┼───────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ interface_name │ 407761 │ 32730 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:27+00 │ nicolargo-xps15 │ interface_name │ 2877 │ 4857 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:29+00 │ nicolargo-xps15 │ interface_name │ 44504 │ 32555 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:31+00 │ nicolargo-xps15 │ interface_name │ 1092285 │ 48600 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:33+00 │ nicolargo-xps15 │ interface_name │ 150119 │ 43805 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:35+00 │ nicolargo-xps15 │ interface_name │ 34424 │ 14825 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:37+00 │ nicolargo-xps15 │ interface_name │ 19382 │ 33614 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:39+00 │ nicolargo-xps15 │ interface_name │ 53060 │ 39780 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:41+00 │ nicolargo-xps15 │ interface_name │ 371914 │ 78626 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:43+00 │ nicolargo-xps15 │ interface_name │ 82356 │ 60612 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ interface_name │ 3766 │ 9977 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:32+00 │ nicolargo-xps15 │ interface_name │ 188036 │ 18668 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:34+00 │ nicolargo-xps15 │ interface_name │ 543 │ 2451 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:36+00 │ nicolargo-xps15 │ interface_name │ 8247 │ 7275 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:38+00 │ nicolargo-xps15 │ interface_name │ 7252 │ 986 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:40+00 │ nicolargo-xps15 │ interface_name │ 172 │ 132 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:42+00 │ nicolargo-xps15 │ interface_name │ 8080 │ 6640 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:44+00 │ nicolargo-xps15 │ interface_name │ 19660 │ 17830 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:46+00 │ nicolargo-xps15 │ interface_name │ 1007030 │ 84170 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:48+00 │ nicolargo-xps15 │ interface_name │ 128947 │ 18087 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
├─────────────────────┴─────────────────┴────────────────┴────────────┴────────────┴───┴─────────────────────┴────────────────┴────────────────────┴────────────────────┴───────────────────┤
│ 41 rows (20 shown) 28 columns (10 shown) │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
.. _duckdb: https://duckdb.org/

View File

@ -3,24 +3,8 @@
Gateway To Other Services
=========================
Glances can exports stats in files or to other services like databases, message queues, etc.
Each exporter has its own configuration options, which can be set in the Glances
configuration file (`glances.conf`).
A common options section is also available:
is the `exclude_fields` option, which allows you to specify
.. code-block:: ini
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
This section describes the available exporters and how to configure them:
Glances can exports stats to a CSV file. Also, it can act as a gateway
to providing stats to multiple services (see list below).
.. toctree::
:maxdepth: 2
@ -30,13 +14,11 @@ This section describes the available exporters and how to configure them:
couchdb
elastic
graph
graphite
influxdb
json
kafka
mqtt
mongodb
nats
opentsdb
prometheus
rabbitmq

View File

@ -1,68 +0,0 @@
.. _nats:
NATS
====
NATS is a message broker.
You can export statistics to a ``NATS`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[nats]
host=nats://localhost:4222
prefix=glances
and run Glances with:
.. code-block:: console
$ glances --export nats
Data model
-----------
Glances stats are published as JSON messagesto the following subjects:
<prefix>.<plugin>
Example:
CPU stats are published to glances.cpu
So a simple Python client will subscribe to this subject with:
import asyncio
import nats
async def main():
nc = nats.NATS()
await nc.connect(servers=["nats://localhost:4222"])
future = asyncio.Future()
async def cb(msg):
nonlocal future
future.set_result(msg)
await nc.subscribe("glances.cpu", cb=cb)
# Wait for message to come in
print("Waiting (max 30 seconds) for a message on 'glances' subject...")
msg = await asyncio.wait_for(future, 30)
print(msg.subject, msg.data)
if __name__ == '__main__':
asyncio.run(main())
To subscribe to all Glannces stats use wildcard:
await nc.subscribe("glances.*", cb=cb)

View File

@ -40,7 +40,7 @@ be added as a column in the table (named key_id) and added to the timescaledb.se
Current limitations
-------------------
Sensors, Fs and DiskIO plugins are not supported by the TimescaleDB exporter.
Sensors and Fs plugins are not supported by the TimescaleDB exporter.
In the cpu plugin, the user field is exported as user_cpu (user_percpu in the percpu plugin)
because user is a reserved keyword in PostgreSQL.

View File

@ -11,11 +11,12 @@ information depending on the terminal size.
It can also work in client/server mode. Remote monitoring can be
done via terminal, Web interface, or API (XMLRPC and RESTful).
Stats can also be exported to :ref:`files or external databases<gw>`.
Glances is written in Python and uses the `psutil`_ library to get
information from your system.
It is also possible to use it in your own Python scripts thanks to
the :ref:`Glances API<api>` or in any other application through
the :ref:`RESTful API<api_restful>`.
Stats can also be exported to external time/value databases.
.. _psutil: https://github.com/giampaolo/psutil
Table of Contents
=================
@ -29,11 +30,7 @@ Table of Contents
config
aoa/index
gw/index
api/python
api/restful
api
docker
faq
support
.. _psutil: https://github.com/giampaolo/psutil

View File

@ -28,7 +28,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
.TH "GLANCES" "1" "Jan 03, 2026" "4.4.2_dev1" "Glances"
.TH "GLANCES" "1" "Jul 05, 2025" "4.3.2" "Glances"
.SH NAME
glances \- An eye on your system
.SH SYNOPSIS
@ -522,17 +522,11 @@ Switch between process command line or command name
.B \fBF5\fP or \fBCTRL\-R\fP
Refresh user interface
.TP
.B \fBSHIFT\-LEFT\fP
.B \fBLEFT\fP
Navigation left through the process sort
.TP
.B \fBSHIFT\-RIGHT\fP
Navigation right through the process sort
.TP
.B \fBLEFT\fP
Navigation left through the process name
.TP
.B \fBRIGHT\fP
Navigation right through the process name
Navigation right through the process sort
.TP
.B \fBUP\fP
Up in the processes list
@ -672,7 +666,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restful_api.py is hosted
# Default is folder where glances_restfull_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross\-origin requests.
@ -687,10 +681,6 @@ max_processes_display=25
# Comma separated list of HTTP request headers that should be supported for cross\-origin requests.
# Default is *
#cors_headers=*
# Define SSL files (keyfile_password is optional)
#ssl_keyfile=./glances.local+3\-key.pem
#ssl_keyfile_password=kfp
#ssl_certfile=./glances.local+3.pem
.EE
.UNINDENT
.UNINDENT
@ -963,6 +953,6 @@ If you do not want to see the local Glances Web Server in the browser list pleas
.sp
Nicolas Hennion aka Nicolargo <\X'tty: link mailto:contact@nicolargo.com'\fI\%contact@nicolargo.com\fP\X'tty: link'>
.SH COPYRIGHT
2026, Nicolas Hennion
2025, Nicolas Hennion
.\" Generated by docutils manpage writer.
.

View File

@ -4,12 +4,11 @@ Quickstart
==========
This page gives a good introduction to how to get started with Glances.
Glances offers multiple modes:
Glances offers three modes:
- Standalone
- Client/Server
- Web server
- Fetch
Standalone Mode
---------------
@ -197,7 +196,7 @@ Here's a screenshot from Chrome on Android:
.. image:: _static/screenshot-web2.png
How do you protect your server (or Web server) with a login/password ?
----------------------------------------------------------------------
------------------------------------------------------------------
You can set a password to access the server using the ``--password``.
By default, the login is ``glances`` but you can change it with
@ -223,22 +222,3 @@ file:
# Additionally (and optionally) a default password could be defined
localhost=mylocalhostpassword
default=mydefaultpassword
Fetch mode
----------
It is also possible to get and share a quick look of a machine using the
``fetch`` mode. In this mode, current stats are display on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Have a look to the :ref:`fetch documentation page<fetch>` to learn how to create your own template.

View File

@ -1,5 +1,4 @@
import json
from unittest.mock import patch
from fastapi.openapi.utils import get_openapi
@ -9,15 +8,13 @@ from glances.main import GlancesMain
from glances.outputs.glances_restful_api import GlancesRestfulApi
# Init Glances core
testargs = ["glances", "-C", "./conf/glances.conf"]
with patch('sys.argv', testargs):
core = GlancesMain()
core = GlancesMain(args_begin_at=2)
test_config = core.get_config()
test_args = core.get_args()
app = GlancesRestfulApi(config=test_config, args=test_args)._app
with open('./docs/api/openapi.json', 'w') as f:
with open('./docs/openapi.json', 'w') as f:
json.dump(
get_openapi(
title=app.title,

View File

@ -1,13 +0,0 @@
import json
from glances.outputs.glances_curses import _GlancesCurses
print(
json.dumps(
{
"topMenu": list(_GlancesCurses._top),
"leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"],
},
indent=4,
)
)

View File

@ -1,523 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "592b8135-c06b-41b7-895e-9dd70787f6ac",
"metadata": {},
"source": [
"# Use Glances API in your Python code"
]
},
{
"cell_type": "markdown",
"id": "e5ec86ae-ce2b-452f-b715-54e746026a96",
"metadata": {},
"source": [
"## Init the Glances API"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ba9b3546-65a0-4eec-942b-1855ff5c5d32",
"metadata": {},
"outputs": [],
"source": [
"from glances import api"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e81ad928-3b61-4654-8589-13cb29e7f292",
"metadata": {},
"outputs": [],
"source": [
"gl = api.GlancesAPI()"
]
},
{
"cell_type": "markdown",
"id": "6ec912a3-0875-4cdb-8539-e84ffb27768a",
"metadata": {},
"source": [
"## Get plugins list"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1ce57a13-a90d-4d65-b4a4-2bc45112697e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['alert',\n",
" 'ports',\n",
" 'diskio',\n",
" 'containers',\n",
" 'processcount',\n",
" 'programlist',\n",
" 'gpu',\n",
" 'percpu',\n",
" 'vms',\n",
" 'system',\n",
" 'network',\n",
" 'cpu',\n",
" 'amps',\n",
" 'processlist',\n",
" 'load',\n",
" 'sensors',\n",
" 'uptime',\n",
" 'now',\n",
" 'connections',\n",
" 'fs',\n",
" 'wifi',\n",
" 'ip',\n",
" 'help',\n",
" 'version',\n",
" 'psutilversion',\n",
" 'core',\n",
" 'mem',\n",
" 'folders',\n",
" 'quicklook',\n",
" 'memswap',\n",
" 'raid']"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.plugins()"
]
},
{
"cell_type": "markdown",
"id": "d5be2964-7a28-4b93-9dd0-1481afd2ee50",
"metadata": {},
"source": [
"## Get CPU stats"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0d1636d2-3f3e-44d4-bb67-45487384f79f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'total': 3.8, 'user': 3.0, 'nice': 0.0, 'system': 0.8, 'idle': 96.1, 'iowait': 0.1, 'irq': 0.0, 'steal': 0.0, 'guest': 0.0, 'ctx_switches': 0, 'interrupts': 0, 'soft_interrupts': 0, 'syscalls': 0, 'cpucore': 16, 'time_since_update': 141.46278643608093, 'ctx_switches_gauge': 12830371, 'ctx_switches_rate_per_sec': 0, 'interrupts_gauge': 9800040, 'interrupts_rate_per_sec': 0, 'soft_interrupts_gauge': 3875931, 'soft_interrupts_rate_per_sec': 0, 'syscalls_gauge': 0, 'syscalls_rate_per_sec': 0}"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.cpu"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "99681a33-045e-43bf-927d-88b15872fad0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3.1"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.cpu.get('total')"
]
},
{
"cell_type": "markdown",
"id": "07e30de4-8f2a-4110-9c43-2a87d91dbf24",
"metadata": {},
"source": [
"## Get MEMORY stats"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "33502d93-acf9-49c5-8bcd-0a0404b47829",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'total': 16422858752, 'available': 6726169136, 'percent': 59.0, 'used': 9696689616, 'free': 541847552, 'active': 8672595968, 'inactive': 5456875520, 'buffers': 354791424, 'cached': 6520318384, 'shared': 729960448}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.mem"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "404cd8d6-ac38-4830-8ead-4b747e0ca7b1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"6779998768"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.mem.get('available')"
]
},
{
"cell_type": "markdown",
"id": "74e27e9f-3240-4827-a754-3538b7d68119",
"metadata": {},
"source": [
"Display it in a user friendly way:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "fa83b40a-51e8-45fa-b478-d0fcc9de4639",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'6.28G'"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.auto_unit(gl.mem.get('available'))"
]
},
{
"cell_type": "markdown",
"id": "bfaf5b94-7c9c-4fdc-8a91-71f543cafa4b",
"metadata": {},
"source": [
"## Get NETWORK stats"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "a0ab2ce7-e9bd-4a60-9b90-095a9023dac7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'wlp0s20f3': {'bytes_sent': 1130903, 'bytes_recv': 2213272, 'speed': 0, 'key': 'interface_name', 'interface_name': 'wlp0s20f3', 'alias': 'WIFI', 'bytes_all': 3344175, 'time_since_update': 354.35748958587646, 'bytes_recv_gauge': 1108380679, 'bytes_recv_rate_per_sec': 6245.0, 'bytes_sent_gauge': 21062113, 'bytes_sent_rate_per_sec': 3191.0, 'bytes_all_gauge': 1129442792, 'bytes_all_rate_per_sec': 9437.0}}"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network"
]
},
{
"cell_type": "markdown",
"id": "b65f7280-d9f0-4719-9e10-8b78dc414bae",
"metadata": {},
"source": [
"Get the list of networks interfaces:"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "1a55d32a-bd7d-4dfa-b239-8875c01f205e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['wlp0s20f3']"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.keys()"
]
},
{
"cell_type": "markdown",
"id": "8c7e0215-e96a-4f7e-a187-9b7bee1abcf9",
"metadata": {},
"source": [
"Get stats for a specific network interface:"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "9aacfb32-c0e3-4fc7-b1d2-d216e46088cd",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'bytes_sent': 118799,\n",
" 'bytes_recv': 275052,\n",
" 'speed': 0,\n",
" 'key': 'interface_name',\n",
" 'interface_name': 'wlp0s20f3',\n",
" 'alias': 'WIFI',\n",
" 'bytes_all': 393851,\n",
" 'time_since_update': 46.24822926521301,\n",
" 'bytes_recv_gauge': 1108795793,\n",
" 'bytes_recv_rate_per_sec': 5947.0,\n",
" 'bytes_sent_gauge': 21268464,\n",
" 'bytes_sent_rate_per_sec': 2568.0,\n",
" 'bytes_all_gauge': 1130064257,\n",
" 'bytes_all_rate_per_sec': 8516.0}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.get('wlp0s20f3')"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "4f5ae513-6022-4a52-8d6c-e8b62afacc24",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5105.0"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.network.get('wlp0s20f3').get('bytes_recv_rate_per_sec')"
]
},
{
"cell_type": "markdown",
"id": "8b0bdbf4-e386-44aa-9585-1d042f0ded5d",
"metadata": {},
"source": [
"## Additional information"
]
},
{
"cell_type": "markdown",
"id": "5c52a0c7-06fb-432a-bdb7-9921f432d5a6",
"metadata": {},
"source": [
"Example for the LOAD plugin."
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "99303a2b-52a3-440f-a896-ad4951a9de34",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': 1.01123046875, 'min5': 0.83447265625, 'min15': 0.76171875, 'cpucore': 16}"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load"
]
},
{
"cell_type": "markdown",
"id": "7a560824-2787-4436-b39b-63de0c455536",
"metadata": {},
"source": [
"Get the limit configured in the glances.conf:"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "cbbc6a81-623f-4eff-9d08-e6a8b5981660",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 1 minute.',\n",
" 'unit': 'float'},\n",
" 'min5': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 5 minutes.',\n",
" 'unit': 'float'},\n",
" 'min15': {'description': 'Average sum of the number of processes waiting in the run-queue plus the number currently executing over 15 minutes.',\n",
" 'unit': 'float'},\n",
" 'cpucore': {'description': 'Total number of CPU core.', 'unit': 'number'}}"
]
},
"execution_count": 34,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.fields_description"
]
},
{
"cell_type": "markdown",
"id": "2bd51d13-77e3-48f0-aa53-af86df6425f8",
"metadata": {},
"source": [
"Get field description and unit:"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "8682edcf-a8b9-424c-976f-2a301a05be6a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'history_size': 1200.0,\n",
" 'load_disable': ['False'],\n",
" 'load_careful': 0.7,\n",
" 'load_warning': 1.0,\n",
" 'load_critical': 5.0}"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.get_limits()"
]
},
{
"cell_type": "raw",
"id": "3c671ff8-3a0c-48d3-8247-6081c69c19a9",
"metadata": {},
"source": [
"Get current stats views regarding limits:"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "45e03e9b-233c-4359-bcbc-7d2f06aca1c6",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'min1': {'decoration': 'DEFAULT',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'min5': {'decoration': 'OK',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'min15': {'decoration': 'OK_LOG',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False},\n",
" 'cpucore': {'decoration': 'DEFAULT',\n",
" 'optional': False,\n",
" 'additional': False,\n",
" 'splittable': False,\n",
" 'hidden': False}}"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"gl.load.get_views()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.14.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -19,8 +19,7 @@ import tracemalloc
# Global name
# Version should start and end with a numerical char
# See https://packaging.python.org/specifications/core-metadata/#version
# Examples: 1.0.0, 1.0.0rc1, 1.1.0_dev1
__version__ = "4.4.2_dev1"
__version__ = "4.3.2"
__apiversion__ = '4'
__author__ = 'Nicolas Hennion <nicolas@nicolargo.com>'
__license__ = 'LGPLv3'
@ -53,10 +52,10 @@ if psutil_version_info < psutil_min_version:
# Trac malloc is only available on Python 3.4 or higher
def __signal_handler(sig, frame):
logger.debug(f"Signal {sig} caught")
# Avoid Glances hang when killing process with muliple CTRL-C See #3264
signal.signal(signal.SIGINT, signal.SIG_IGN)
def __signal_handler(signal, frame):
logger.debug(f"Signal {signal} caught")
end()
@ -96,8 +95,8 @@ def check_memleak(args, mode):
def setup_server_mode(args, mode):
if args.stdout_issue or args.stdout_api_restful_doc or args.stdout_api_doc:
# Serve once for issue and API documentation modes
if args.stdout_issue or args.stdout_apidoc:
# Serve once for issue/test mode
mode.serve_issue()
else:
# Serve forever
@ -105,18 +104,18 @@ def setup_server_mode(args, mode):
def maybe_trace_memleak(args, snapshot_begin):
if args.trace_malloc or args.memory_leak:
snapshot_end = tracemalloc.take_snapshot()
if args.memory_leak:
snapshot_end = tracemalloc.take_snapshot()
snapshot_diff = snapshot_end.compare_to(snapshot_begin, 'filename')
memory_leak = sum([s.size_diff for s in snapshot_diff])
print(f"Memory consumption: {memory_leak / 1000:.1f}KB (see log for details)")
logger.info("Memory consumption (top 5):")
for stat in snapshot_diff[:5]:
logger.info(stat)
if args.trace_malloc:
elif args.trace_malloc:
# See more options here: https://docs.python.org/3/library/tracemalloc.html
top_stats = snapshot_end.statistics("filename")
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics("filename")
print("[ Trace malloc - Top 10 ]")
for stat in top_stats[:10]:
print(stat)
@ -189,6 +188,3 @@ def main():
# Glances can be ran in standalone, client or server mode
start(config=core.get_config(), args=core.get_args())
# End of glances/__init__.py

View File

@ -66,7 +66,7 @@ class Amp(GlancesAmp):
"""Update the AMP"""
# Get the Nginx status
logger.debug('{}: Update stats using status URL {}'.format(self.NAME, self.get('status_url')))
res = requests.get(self.get('status_url'), timeout=15)
res = requests.get(self.get('status_url'))
if res.ok:
# u'Active connections: 1 \nserver accepts handled requests\n 1 1 1 \nReading: 0 Writing: 1 Waiting: 0 \n'
self.set_result(res.text.rstrip())

View File

@ -1,117 +0,0 @@
#
# Glances - An eye on your system
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
from glances import __version__ as glances_version
from glances.globals import auto_unit, weak_lru_cache
from glances.main import GlancesMain
from glances.outputs.glances_bars import Bar
from glances.processes import sort_stats
from glances.stats import GlancesStats
plugin_dependencies_tree = {
'processlist': ['processcount'],
}
class GlancesAPI:
ttl = 2.0 # Default cache TTL in seconds
def __init__(self, config=None, args=None):
self.__version__ = glances_version.split('.')[0] # Get the major version
core = GlancesMain()
self.args = args if args is not None else core.get_args()
self.config = config if config is not None else core.get_config()
self._stats = GlancesStats(config=self.config, args=self.args)
# Set the cache TTL for the API
self.ttl = self.args.time if self.args.time is not None else self.ttl
# Init the stats of all plugins in order to ensure that rate are computed
self._stats.update()
@weak_lru_cache(maxsize=1, ttl=ttl)
def __getattr__(self, item):
"""Fallback to the stats object for any missing attributes."""
if item in self._stats.getPluginsList():
if item in plugin_dependencies_tree:
# Ensure dependencies are updated before accessing the plugin
for dependency in plugin_dependencies_tree[item]:
self._stats.get_plugin(dependency).update()
# Update the plugin stats
self._stats.get_plugin(item).update()
return self._stats.get_plugin(item)
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")
def plugins(self):
"""Return the list of available plugins."""
return self._stats.getPluginsList()
def auto_unit(self, number, low_precision=False, min_symbol='K', none_symbol='-'):
"""
Converts a numeric value into a human-readable string with appropriate units.
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
"""
return auto_unit(number, low_precision, min_symbol, none_symbol)
def bar(self, value, size=18, bar_char='', empty_char='', pre_char='', post_char=''):
"""
Generate a progress bar representation for a given value.
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
"""
b = Bar(
size, bar_char=bar_char, empty_char=empty_char, pre_char=pre_char, post_char=post_char, display_value=False
)
b.percent = value
return b.get()
def top_process(self, limit=3, sorted_by='cpu_percent', sorted_by_secondary='memory_percent'):
"""
Returns a list of the top processes sorted by specified criteria.
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
"""
# Exclude glances process from the top list
# because in fetch mode, Glances generate a CPU load
all_but_glances = [
p
for p in self._stats.get_plugin('processlist').get_raw()
if p['cmdline'] and 'glances' not in (p['cmdline'] or ())
]
return sort_stats(all_but_glances, sorted_by=sorted_by, sorted_by_secondary=sorted_by_secondary)[:limit]

View File

@ -19,7 +19,6 @@ from glances.logger import logger
from glances.outputs.glances_curses import GlancesCursesClient
from glances.outputs.glances_stdout import GlancesStdout
from glances.outputs.glances_stdout_csv import GlancesStdoutCsv
from glances.outputs.glances_stdout_fetch import GlancesStdoutFetch
from glances.outputs.glances_stdout_json import GlancesStdoutJson
from glances.stats_client import GlancesStatsClient
from glances.timer import Counter
@ -188,9 +187,6 @@ class GlancesClient:
logger.info(f"Stdout CSV mode is ON, following stats will be displayed: {self.args.stdout_csv}")
# Init screen
self.screen = GlancesStdoutCsv(config=self.config, args=self.args)
elif self.args.stdout_fetch:
logger.info("Fetch mode is ON")
self.screen = GlancesStdoutFetch(config=self.config, args=self.args)
else:
self.screen = GlancesCursesClient(config=self.config, args=self.args)
@ -284,8 +280,8 @@ class GlancesClient:
else:
# In quiet mode, we only wait adapated_refresh seconds
time.sleep(adapted_refresh)
except Exception:
logger.critical("Critical error in client serve_forever loop")
except Exception as e:
logger.critical(f"Critical error in client serve_forever loop: {e}")
self.end()
return self.client_mode

View File

@ -9,7 +9,7 @@
"""CPU percent stats shared between CPU and Quicklook plugins."""
import platform
from typing import TypedDict
from typing import Optional, TypedDict
import psutil
@ -18,184 +18,11 @@ from glances.timer import Timer
__all__ = ["cpu_percent"]
CPU_IMPLEMENTERS = {
0x41: 'ARM Limited',
0x42: 'Broadcom',
0x43: 'Cavium',
0x44: 'DEC',
0x46: 'Fujitsu',
0x48: 'HiSilicon',
0x49: 'Infineon Technologies',
0x4D: 'Motorola/Freescale',
0x4E: 'NVIDIA',
0x50: 'Applied Micro (APM)',
0x51: 'Qualcomm',
0x53: 'Samsung',
0x56: 'Marvell',
0x61: 'Apple',
0x66: 'Faraday',
0x69: 'Intel',
0x6D: 'Microsoft',
0x70: 'Phytium',
0xC0: 'Ampere Computing',
}
CPU_PARTS = {
# ARM Limited (0x41)
0x41: {
0xD03: 'Cortex-A53',
0xD04: 'Cortex-A35',
0xD05: 'Cortex-A55',
0xD06: 'Cortex-A65',
0xD07: 'Cortex-A57',
0xD08: 'Cortex-A72',
0xD09: 'Cortex-A73',
0xD0A: 'Cortex-A75',
0xD0B: 'Cortex-A76',
0xD0C: 'Neoverse N1',
0xD0D: 'Cortex-A77',
0xD0E: 'Cortex-A76AE',
0xD13: 'Cortex-R52',
0xD20: 'Cortex-M23',
0xD21: 'Cortex-M33',
0xD40: 'Neoverse V1',
0xD41: 'Cortex-A78',
0xD42: 'Cortex-A78AE',
0xD43: 'Cortex-A65AE',
0xD44: 'Cortex-X1',
0xD46: 'Cortex-A510',
0xD47: 'Cortex-A710',
0xD48: 'Cortex-X2',
0xD49: 'Neoverse N2',
0xD4A: 'Neoverse E1',
0xD4B: 'Cortex-A78C',
0xD4C: 'Cortex-X1C',
0xD4D: 'Cortex-A715',
0xD4E: 'Cortex-X3',
0xD4F: 'Neoverse V2',
0xD80: 'Cortex-A520',
0xD81: 'Cortex-A720',
0xD82: 'Cortex-X4',
0xD84: 'Neoverse V3',
0xD85: 'Cortex-X925',
0xD87: 'Cortex-A725',
},
# Apple (0x61)
0x61: {
0x000: 'Swift',
0x001: 'Cyclone',
0x002: 'Typhoon',
0x003: 'Twister',
0x004: 'Hurricane',
0x005: 'Monsoon/Mistral',
0x006: 'Vortex/Tempest',
0x007: 'Lightning/Thunder',
0x008: 'Firestorm/Icestorm (M1)',
0x009: 'Avalanche/Blizzard (M2)',
0x00E: 'Everest/Sawtooth (M3)',
0x010: 'Blizzard/Avalanche (A16)',
0x011: 'Coll (M4)',
},
# Qualcomm (0x51)
0x51: {
0x00F: 'Scorpion',
0x02D: 'Scorpion',
0x04D: 'Krait',
0x06F: 'Krait',
0x201: 'Kryo',
0x205: 'Kryo',
0x211: 'Kryo',
0x800: 'Kryo 260/280 Gold (Cortex-A73)',
0x801: 'Kryo 260/280 Silver (Cortex-A53)',
0x802: 'Kryo 385 Gold (Cortex-A75)',
0x803: 'Kryo 385 Silver (Cortex-A55)',
0x804: 'Kryo 485 Gold (Cortex-A76)',
0x805: 'Kryo 485 Silver (Cortex-A55)',
0xC00: 'Falkor',
0xC01: 'Saphira',
},
# Samsung (0x53)
0x53: {
0x001: 'Exynos M1/M2',
0x002: 'Exynos M3',
0x003: 'Exynos M4',
0x004: 'Exynos M5',
},
# NVIDIA (0x4e)
0x4E: {
0x000: 'Denver',
0x003: 'Denver 2',
0x004: 'Carmel',
},
# Marvell (0x56)
0x56: {
0x131: 'Feroceon 88FR131',
0x581: 'PJ4/PJ4b',
0x584: 'PJ4B-MP',
},
# Cavium (0x43)
0x43: {
0x0A0: 'ThunderX',
0x0A1: 'ThunderX 88XX',
0x0A2: 'ThunderX 81XX',
0x0A3: 'ThunderX 83XX',
0x0AF: 'ThunderX2 99xx',
0x0B0: 'OcteonTX2',
0x0B1: 'OcteonTX2 T98',
0x0B2: 'OcteonTX2 T96',
0x0B3: 'OcteonTX2 F95',
0x0B4: 'OcteonTX2 F95N',
0x0B5: 'OcteonTX2 F95MM',
},
# Broadcom (0x42)
0x42: {
0x00F: 'Brahma B15',
0x100: 'Brahma B53',
0x516: 'Vulcan',
},
# HiSilicon (0x48)
0x48: {
0xD01: 'Kunpeng-920',
0xD40: 'Cortex-A76 (Kirin)',
},
# Ampere (0xc0)
0xC0: {
0xAC3: 'Ampere-1',
0xAC4: 'Ampere-1a',
},
# Fujitsu (0x46)
0x46: {
0x001: 'A64FX',
},
# Intel (0x69) - ARM-based chips
0x69: {
0x200: 'i80200',
0x210: 'PXA250A',
0x212: 'PXA210A',
0x242: 'i80321-400',
0x243: 'i80321-600',
0x290: 'PXA250B/PXA26x',
0x292: 'PXA210B',
0x2C2: 'i80321-400-B0',
0x2C3: 'i80321-600-B0',
0x2D0: 'PXA250C/PXA255/PXA26x',
0x2D2: 'PXA210C',
0x411: 'PXA27x',
0x41C: 'IPX425-533',
0x41D: 'IPX425-400',
0x41F: 'IPX425-266',
0x682: 'PXA32x',
0x683: 'PXA930/PXA935',
0x688: 'PXA30x',
0x689: 'PXA31x',
},
}
class CpuInfo(TypedDict):
cpu_name: str
cpu_hz: float | None
cpu_hz_current: float | None
cpu_hz: Optional[float]
cpu_hz_current: Optional[float]
class PerCpuPercentInfo(TypedDict):
@ -205,15 +32,15 @@ class PerCpuPercentInfo(TypedDict):
user: float
system: float
idle: float
nice: float | None
iowait: float | None
irq: float | None
softirq: float | None
steal: float | None
guest: float | None
guest_nice: float | None
dpc: float | None
interrupt: float | None
nice: Optional[float]
iowait: Optional[float]
irq: Optional[float]
softirq: Optional[float]
steal: Optional[float]
guest: Optional[float]
guest_nice: Optional[float]
dpc: Optional[float]
interrupt: Optional[float]
class CpuPercent:
@ -257,7 +84,7 @@ class CpuPercent:
self.cpu_info['cpu_hz_current'] = cpu_freq.current
else:
self.cpu_info['cpu_hz_current'] = None
if hasattr(cpu_freq, 'max') and cpu_freq.max != 0.0:
if hasattr(cpu_freq, 'max'):
self.cpu_info['cpu_hz'] = cpu_freq.max
else:
self.cpu_info['cpu_hz'] = None
@ -276,18 +103,9 @@ class CpuPercent:
logger.debug("No permission to read '/proc/cpuinfo'")
return ret
cpu_implementer = None
for line in cpuinfo_lines:
# Look for the CPU name
if line.startswith('model name') or line.startswith('Model') or line.startswith('cpu model'):
return line.split(':')[1].strip()
# Look for the CPU name on ARM architecture (see #3127)
if line.startswith('CPU implementer'):
cpu_implementer = CPU_IMPLEMENTERS.get(int(line.split(':')[1].strip(), 16), ret)
ret = cpu_implementer
if line.startswith('CPU part') and cpu_implementer in CPU_PARTS:
cpu_part = CPU_PARTS[cpu_implementer].get(int(line.split(':')[1].strip(), 16), 'Unknown')
ret = f'{cpu_implementer} {cpu_part}'
return ret

View File

@ -271,7 +271,6 @@ class GlancesEventsList:
event_time, event_index, event_state, event_type, event_value, proc_list, proc_desc, global_message
)
# logger.info(self.events_list)
return self.len()
def _create_event(self, event_time, event_state, event_type, event_value, proc_desc, global_message):

View File

@ -1,7 +1,7 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2026 Nicolas Hennion <nicolas@nicolargo.com>
# SPDX-FileCopyrightText: 2022 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
@ -11,8 +11,6 @@ I am your father...
...for all Glances exports IF.
"""
import re
from glances.globals import NoOptionError, NoSectionError, json_dumps
from glances.logger import logger
from glances.timer import Counter
@ -55,10 +53,6 @@ class GlancesExport:
# Fields description
self._fields_description = None
# Load the default common export configuration
if self.config is not None:
self.load_common_conf()
def _log_result_decorator(fct):
"""Log (DEBUG) the result of the function fct."""
@ -77,24 +71,6 @@ class GlancesExport:
"""Close the export module."""
logger.debug(f"Finalise export interface {self.export_name}")
def load_common_conf(self):
"""Load the common export configuration in the Glances configuration file.
:returns: Boolean -- True if section is found
"""
# Read the common [export] section
section = "export"
opt = "exclude_fields"
try:
setattr(self, opt, self.config.get_list_value(section, opt))
except NoOptionError:
logger.debug(f"{opt} option not found in the {section} configuration section")
logger.debug(f"Load common {section} from the Glances configuration file")
return True
def load_conf(self, section, mandatories=["host", "port"], options=None):
"""Load the export <section> configuration in the Glances configuration file.
@ -125,7 +101,7 @@ class GlancesExport:
try:
setattr(self, opt, self.config.get_value(section, opt))
except NoOptionError:
logger.debug(f"{opt} option not found in the {section} configuration section")
pass
logger.debug(f"Load {section} from the Glances configuration file")
logger.debug(f"{section} parameters: { ({opt: getattr(self, opt) for opt in mandatories + options}) }")
@ -152,7 +128,7 @@ class GlancesExport:
d_tags = {}
if tags:
try:
d_tags = dict(x.split(":", 1) for x in tags.split(","))
d_tags = dict([x.split(":") for x in tags.split(",")])
except ValueError:
# one of the 'key:value' pairs was missing
logger.info("Invalid tags passed: %s", tags)
@ -223,10 +199,6 @@ class GlancesExport:
ret.append({"measurement": name, "tags": tags, "fields": fields})
return ret
def is_excluded(self, field):
"""Return true if the field is excluded."""
return any(re.fullmatch(i, field, re.I) for i in (getattr(self, 'exclude_fields') or ()))
def plugins_to_export(self, stats):
"""Return the list of plugins to export.
@ -294,7 +266,7 @@ class GlancesExport:
if isinstance(stats, dict):
# Stats is a dict
# Is there a key ?
if "key" in stats and stats["key"] in stats:
if "key" in stats.keys() and stats["key"] in stats.keys():
pre_key = "{}.".format(stats[stats["key"]])
else:
pre_key = ""
@ -313,8 +285,6 @@ class GlancesExport:
export_values += item_values
else:
# We are on a simple value
if self.is_excluded(pre_key + key.lower()):
continue
export_names.append(pre_key + key.lower())
export_values.append(value)
elif isinstance(stats, list):

View File

@ -1,165 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2026 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""
I am your son...
...abstract class for AsyncIO-based Glances exports.
"""
import asyncio
import threading
import time
from abc import abstractmethod
from glances.exports.export import GlancesExport
from glances.logger import logger
class GlancesExportAsyncio(GlancesExport):
"""Abstract class for AsyncIO-based export modules.
This class manages a persistent event loop in a background thread,
allowing child classes to use AsyncIO operations for exporting data.
Child classes must implement:
- async _async_init(): AsyncIO initialization (e.g., connection setup)
- async _async_exit(): AsyncIO cleanup (e.g., disconnection)
- async _async_export(name, columns, points): AsyncIO export operation
"""
def __init__(self, config=None, args=None):
"""Init the AsyncIO export interface."""
super().__init__(config=config, args=args)
# AsyncIO event loop management
self.loop = None
self._loop_ready = threading.Event()
self._loop_exception = None
self._shutdown = False
# Start the background event loop thread
self._loop_thread = threading.Thread(target=self._run_event_loop, daemon=True)
self._loop_thread.start()
# Wait for the loop to be ready
if not self._loop_ready.wait(timeout=10):
raise RuntimeError("AsyncIO event loop failed to start within timeout")
if self._loop_exception:
raise RuntimeError(f"AsyncIO event loop creation failed: {self._loop_exception}")
if self.loop is None:
raise RuntimeError("AsyncIO event loop is None after initialization")
# Call child class AsyncIO initialization
future = asyncio.run_coroutine_threadsafe(self._async_init(), self.loop)
try:
future.result(timeout=10)
logger.debug(f"{self.export_name} AsyncIO export initialized successfully")
except Exception as e:
logger.warning(f"{self.export_name} AsyncIO initialization failed: {e}. Will retry in background.")
def _run_event_loop(self):
"""Run event loop in background thread."""
try:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self._loop_ready.set()
self.loop.run_forever()
except Exception as e:
self._loop_exception = e
self._loop_ready.set()
logger.error(f"{self.export_name} AsyncIO event loop thread error: {e}")
finally:
# Clean up pending tasks
pending = asyncio.all_tasks(self.loop)
for task in pending:
task.cancel()
if pending:
self.loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
self.loop.close()
@abstractmethod
async def _async_init(self):
"""AsyncIO initialization method.
Child classes should implement this method to perform AsyncIO-based
initialization such as connecting to servers, setting up clients, etc.
This method is called once during __init__ after the event loop is ready.
"""
pass
@abstractmethod
async def _async_exit(self):
"""AsyncIO cleanup method.
Child classes should implement this method to perform AsyncIO-based
cleanup such as disconnecting from servers, closing clients, etc.
This method is called during exit() before stopping the event loop.
"""
pass
@abstractmethod
async def _async_export(self, name, columns, points):
"""AsyncIO export method.
Child classes must implement this method to perform the actual
export operation using AsyncIO.
:param name: plugin name
:param columns: list of column names
:param points: list of values corresponding to columns
"""
pass
def exit(self):
"""Close the AsyncIO export module."""
super().exit()
self._shutdown = True
logger.info(f"{self.export_name} AsyncIO export shutting down")
# Call child class cleanup
if self.loop:
future = asyncio.run_coroutine_threadsafe(self._async_exit(), self.loop)
try:
future.result(timeout=5)
except Exception as e:
logger.error(f"{self.export_name} Error in AsyncIO cleanup: {e}")
# Stop the event loop
if self.loop:
self.loop.call_soon_threadsafe(self.loop.stop)
time.sleep(0.5)
logger.debug(f"{self.export_name} AsyncIO export shutdown complete")
def export(self, name, columns, points):
"""Export data using AsyncIO.
This method bridges the synchronous export() interface with
the AsyncIO _async_export() implementation.
"""
if self._shutdown:
logger.debug(f"{self.export_name} Export called during shutdown, skipping")
return
if not self.loop or not self.loop.is_running():
logger.error(f"{self.export_name} AsyncIO event loop is not running")
return
# Submit the export operation to the background event loop
try:
future = asyncio.run_coroutine_threadsafe(self._async_export(name, columns, points), self.loop)
# Don't block forever - use a short timeout
future.result(timeout=1)
except asyncio.TimeoutError:
logger.warning(f"{self.export_name} AsyncIO export timeout for {name}")
except Exception as e:
logger.error(f"{self.export_name} AsyncIO export error for {name}: {e}", exc_info=True)

View File

@ -12,7 +12,7 @@
# How to test ?
#
# 1) docker run -d -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=admin -p 5984:5984 --name my-couchdb couchdb
# 2) .venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 2) ./venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 3) Result can be seen at: http://127.0.0.1:5984/_utils
#

View File

@ -1,195 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""DuckDB interface class."""
import sys
import time
from datetime import datetime
from platform import node
import duckdb
from glances.exports.export import GlancesExport
from glances.logger import logger
# Define the type conversions for DuckDB
# https://duckdb.org/docs/stable/clients/python/conversion
convert_types = {
'bool': 'BOOLEAN',
'int': 'BIGINT',
'float': 'DOUBLE',
'str': 'VARCHAR',
'tuple': 'VARCHAR', # Store tuples as VARCHAR (comma-separated)
'list': 'VARCHAR', # Store lists as VARCHAR (comma-separated)
'NoneType': 'VARCHAR',
}
class Export(GlancesExport):
"""This class manages the DuckDB export module."""
def __init__(self, config=None, args=None):
"""Init the DuckDB export IF."""
super().__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.db = None
# Optional configuration keys
self.user = None
self.password = None
self.hostname = None
# Load the configuration file
self.export_enable = self.load_conf(
'duckdb', mandatories=['database'], options=['user', 'password', 'hostname']
)
if not self.export_enable:
exit('Missing DuckDB config')
# The hostname is always add as an identifier in the DuckDB table
# so we can filter the stats by hostname
self.hostname = self.hostname or node().split(".")[0]
# Init the DuckDB client
self.client = self.init()
def init(self):
"""Init the connection to the DuckDB server."""
if not self.export_enable:
return None
try:
db = duckdb.connect(database=self.database)
except Exception as e:
logger.critical(f"Cannot connect to DuckDB {self.database} ({e})")
sys.exit(2)
else:
logger.info(f"Stats will be exported to DuckDB: {self.database}")
return db
def normalize(self, value):
# Nothing to do...
if isinstance(value, list) and len(value) == 1 and value[0] in ['True', 'False']:
return bool(value[0])
return value
def update(self, stats):
"""Update the DuckDB export module."""
if not self.export_enable:
return False
# Get all the stats & limits
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
# Loop over plugins to export
for plugin in self.last_exported_list():
# Remove some fields
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
# Remove the <plugin>_disable field
all_stats[plugin].pop(f"{plugin}_disable", None)
elif isinstance(all_stats[plugin], list):
for i in all_stats[plugin]:
i.update(all_limits[plugin])
# Remove the <plugin>_disable field
i.pop(f"{plugin}_disable", None)
else:
continue
plugin_stats = all_stats[plugin]
creation_list = [] # List used to create the DuckDB table
values_list = [] # List of values to insert (list of lists, one list per row)
if isinstance(plugin_stats, dict):
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
for key, value in plugin_stats.items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.extend([self.normalize(value) for value in plugin_stats.values()])
values_list = [item_list]
elif isinstance(plugin_stats, list) and len(plugin_stats) > 0 and 'key' in plugin_stats[0]:
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
creation_list.append('key_id VARCHAR')
for key, value in plugin_stats[0].items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
for plugin_item in plugin_stats:
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.append(self.normalize(f"{plugin_item.get('key')}"))
item_list.extend([self.normalize(value) for value in plugin_item.values()])
values_list.append(item_list)
else:
continue
# Export stats to DuckDB
self.export(plugin, creation_list, values_list)
return True
def export(self, plugin, creation_list, values_list):
"""Export the stats to the DuckDB server."""
logger.debug(f"Export {plugin} stats to DuckDB")
# Create the table if it does not exist
table_list = [t[0] for t in self.client.sql("SHOW TABLES").fetchall()]
if plugin not in table_list:
# Execute the create table query
create_query = f"""
CREATE TABLE {plugin} (
{', '.join(creation_list)}
);"""
logger.debug(f"Create table: {create_query}")
try:
self.client.execute(create_query)
except Exception as e:
logger.error(f"Cannot create table {plugin}: {e}")
return
# Commit the changes
self.client.commit()
# Insert values into the table
for values in values_list:
insert_query = f"""
INSERT INTO {plugin} VALUES (
{', '.join(['?' for _ in values])}
);"""
logger.debug(f"Insert values into table {plugin}: {values}")
try:
self.client.execute(insert_query, values)
except Exception as e:
logger.error(f"Cannot insert data into table {plugin}: {e}")
# Commit the changes
self.client.commit()
def exit(self):
"""Close the DuckDB export module."""
# Force last write
self.client.commit()
# Close the DuckDB client
time.sleep(3) # Wait a bit to ensure all data is written
self.client.close()
# Call the father method
super().exit()

0
glances/exports/glances_mqtt/__init__.py Normal file → Executable file
View File

View File

@ -1,134 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2026 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""NATS interface class."""
from nats.aio.client import Client as NATS
from nats.errors import ConnectionClosedError
from nats.errors import TimeoutError as NatsTimeoutError
from glances.exports.export_asyncio import GlancesExportAsyncio
from glances.globals import json_dumps
from glances.logger import logger
class Export(GlancesExportAsyncio):
"""This class manages the NATS export module."""
def __init__(self, config=None, args=None):
"""Init the NATS export IF."""
# Load the NATS configuration file before calling super().__init__
# because super().__init__ will call _async_init() which needs config
self.config = config
self.args = args
self.export_name = self.__class__.__module__
export_enable = self.load_conf(
'nats',
mandatories=['host'],
options=['prefix'],
)
if not export_enable:
exit('Missing NATS config')
self.prefix = self.prefix or 'glances'
# Host is a comma-separated list of NATS servers
self.hosts = self.host
# NATS-specific attributes
self.client = None
self._connected = False
self._publish_count = 0
# Call parent __init__ which will start event loop and call _async_init()
super().__init__(config=config, args=args)
# Restore export_enable after super().__init__() resets it to False
self.export_enable = export_enable
async def _async_init(self):
"""Connect to NATS with error handling."""
try:
if self.client:
try:
await self.client.close()
except Exception as e:
logger.debug(f"NATS Error closing existing client: {e}")
self.client = NATS()
logger.debug(f"NATS Connecting to servers: {self.hosts}")
# Configure with reconnection callbacks
await self.client.connect(
servers=[s.strip() for s in self.hosts.split(',')],
reconnect_time_wait=2,
max_reconnect_attempts=60,
error_cb=self._error_callback,
disconnected_cb=self._disconnected_callback,
reconnected_cb=self._reconnected_callback,
)
self._connected = True
logger.info(f"NATS Successfully connected to servers: {self.hosts}")
except Exception as e:
self._connected = False
logger.error(f"NATS connection error: {e}")
raise
async def _error_callback(self, e):
"""Called when NATS client encounters an error."""
logger.error(f"NATS error callback: {e}")
async def _disconnected_callback(self):
"""Called when disconnected from NATS."""
self._connected = False
logger.debug("NATS disconnected callback")
async def _reconnected_callback(self):
"""Called when reconnected to NATS."""
self._connected = True
logger.debug("NATS reconnected callback")
async def _async_exit(self):
"""Disconnect from NATS."""
try:
if self.client and self._connected:
await self.client.drain()
await self.client.close()
self._connected = False
logger.debug(f"NATS disconnected cleanly. Total messages published: {self._publish_count}")
except Exception as e:
logger.error(f"NATS Error in disconnect: {e}")
async def _async_export(self, name, columns, points):
"""Write the points to NATS using AsyncIO."""
if not self._connected:
logger.warning("NATS not connected, skipping export")
return
subject_name = f"{self.prefix}.{name}"
subject_data = dict(zip(columns, points))
# Publish data to NATS
try:
if not self._connected:
raise ConnectionClosedError("NATS Not connected to server")
await self.client.publish(subject_name, json_dumps(subject_data))
await self.client.flush(timeout=2.0)
self._publish_count += 1
except (ConnectionClosedError, NatsTimeoutError) as e:
self._connected = False
logger.error(f"NATS publish failed for {subject_name}: {e}")
raise
except Exception as e:
logger.error(f"NATS Unexpected error publishing {subject_name}: {e}", exc_info=True)
raise
# End of glances/exports/glances_nats/__init__.py

View File

@ -43,9 +43,6 @@ class Export(GlancesExport):
# Perhaps a better method is possible...
self._metric_dict = {}
# Keys name (compute in update() method)
self.keys_name = {}
# Init the Prometheus Exporter
self.init()
@ -59,41 +56,29 @@ class Export(GlancesExport):
else:
logger.info(f"Start Prometheus exporter on {self.host}:{self.port}")
def update(self, stats):
self.keys_name = {k: stats.get_plugin(k).get_key() for k in stats.getPluginsList()}
super().update(stats)
def export(self, name, columns, points):
"""Write the points to the Prometheus exporter using Gauge."""
logger.debug(f"Export {name} stats to Prometheus exporter")
# Remove non number stats and convert all to float (for Boolean)
data = {str(k): float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
data = {k: float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
# Write metrics to the Prometheus exporter
for metric, value in data.items():
labels = self.labels
metric_name = self.prefix + self.METRIC_SEPARATOR + name + self.METRIC_SEPARATOR
try:
obj, stat = metric.split('.')
metric_name += stat
labels += f",{self.keys_name.get(name)}:{obj}"
except ValueError:
metric_name += metric
for k, v in data.items():
# Prometheus metric name: prefix_<glances stats name>
metric_name = self.prefix + self.METRIC_SEPARATOR + str(name) + self.METRIC_SEPARATOR + str(k)
# Prometheus is very sensible to the metric name
# See: https://prometheus.io/docs/practices/naming/
for c in ' .-/:[]':
metric_name = metric_name.replace(c, self.METRIC_SEPARATOR)
# Get the labels
labels = self.parse_tags(labels)
labels = self.parse_tags(self.labels)
# Manage an internal dict between metric name and Gauge
if metric_name not in self._metric_dict:
self._metric_dict[metric_name] = Gauge(metric_name, "", labelnames=listkeys(labels))
self._metric_dict[metric_name] = Gauge(metric_name, k, labelnames=listkeys(labels))
# Write the value
if hasattr(self._metric_dict[metric_name], 'labels'):
# Add the labels (see issue #1255)
self._metric_dict[metric_name].labels(**labels).set(value)
self._metric_dict[metric_name].labels(**labels).set(v)
else:
self._metric_dict[metric_name].set(value)
self._metric_dict[metric_name].set(v)

View File

@ -54,7 +54,7 @@ class Export(GlancesExport):
# One complete loop have been done
logger.debug(f"Export stats ({listkeys(self.buffer)}) to RESTful endpoint ({self.client})")
# Export stats
post(self.client, json=self.buffer, allow_redirects=True, timeout=15)
post(self.client, json=self.buffer, allow_redirects=True)
# Reset buffer
self.buffer = {}

View File

@ -83,9 +83,6 @@ class Export(GlancesExport):
if isinstance(value, bool):
return str(value).upper()
if isinstance(value, (list, tuple)):
# Special case for list of one boolean
if len(value) == 1 and isinstance(value[0], bool):
return str(value[0]).upper()
return ', '.join([f"'{v}'" for v in value])
if isinstance(value, str):
return f"'{value}'"
@ -98,8 +95,8 @@ class Export(GlancesExport):
return False
# Get all the stats & limits
# @TODO: Current limitation with sensors, fs and diskio plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs', 'diskio']]
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
@ -162,9 +159,6 @@ class Export(GlancesExport):
continue
# Export stats to TimescaleDB
# logger.info(plugin)
# logger.info(f"Segmented by: {segmented_by}")
# logger.info(list(zip(creation_list, values_list[0])))
self.export(plugin, creation_list, segmented_by, values_list)
return True

View File

@ -17,12 +17,10 @@ import base64
import errno
import functools
import importlib
import multiprocessing
import os
import platform
import queue
import re
import socket
import subprocess
import sys
import weakref
@ -36,8 +34,6 @@ from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
from urllib.request import Request, urlopen
import psutil
# Prefer faster libs for JSON (de)serialization
# Preference Order: orjson > ujson > json (builtin)
try:
@ -100,11 +96,6 @@ viewkeys = methodcaller('keys')
viewvalues = methodcaller('values')
viewitems = methodcaller('items')
# Multiprocessing start method (on POSIX system)
if LINUX or BSD or SUNOS or MACOS:
ctx_mp_fork = multiprocessing.get_context('fork')
else:
ctx_mp_fork = multiprocessing.get_context()
###################
# GLOBALS FUNCTIONS
@ -136,6 +127,18 @@ def listvalues(d):
return list(d.values())
def iteritems(d):
return iter(d.items())
def iterkeys(d):
return iter(d.keys())
def itervalues(d):
return iter(d.values())
def u(s, errors='replace'):
if isinstance(s, text_type):
return s
@ -367,18 +370,11 @@ def json_dumps(data) -> bytes:
return b(res)
def json_loads(data: str | bytes | bytearray) -> dict | list:
def json_loads(data: Union[str, bytes, bytearray]) -> Union[dict, list]:
"""Load a JSON buffer into memory as a Python object"""
return json.loads(data)
def list_to_dict(data):
"""Convert a list of dict (with key in 'key') to a dict with key as key and value as value."""
if not isinstance(data, list):
return None
return {item[item['key']]: item for item in data if 'key' in item}
def dictlist(data, item):
if isinstance(data, dict):
try:
@ -404,7 +400,7 @@ def dictlist_json_dumps(data, item):
return json_dumps(dl)
def dictlist_first_key_value(data: list[dict], key, value) -> dict | None:
def dictlist_first_key_value(data: list[dict], key, value) -> Optional[dict]:
"""In a list of dict, return first item where key=value or none if not found."""
try:
ret = next(item for item in data if key in item and item[key] == value)
@ -413,65 +409,6 @@ def dictlist_first_key_value(data: list[dict], key, value) -> dict | None:
return ret
def auto_unit(number, low_precision=False, min_symbol='K', none_symbol='-'):
"""Make a nice human-readable string out of number.
Number of decimal places increases as quantity approaches 1.
CASE: 613421788 RESULT: 585M low_precision: 585M
CASE: 5307033647 RESULT: 4.94G low_precision: 4.9G
CASE: 44968414685 RESULT: 41.9G low_precision: 41.9G
CASE: 838471403472 RESULT: 781G low_precision: 781G
CASE: 9683209690677 RESULT: 8.81T low_precision: 8.8T
CASE: 1073741824 RESULT: 1024M low_precision: 1024M
CASE: 1181116006 RESULT: 1.10G low_precision: 1.1G
:low_precision: returns less decimal places potentially (default is False)
sacrificing precision for more readability.
:min_symbol: Do not approach if number < min_symbol (default is K)
:decimal_count: if set, force the number of decimal number (default is None)
"""
if number is None:
return none_symbol
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
if min_symbol in symbols:
symbols = symbols[symbols.index(min_symbol) :]
prefix = {
'Y': 1208925819614629174706176,
'Z': 1180591620717411303424,
'E': 1152921504606846976,
'P': 1125899906842624,
'T': 1099511627776,
'G': 1073741824,
'M': 1048576,
'K': 1024,
}
if number == 0:
# Avoid 0.0
return '0'
# If a value is a float, decimal_precision is 2 else 0
decimal_precision = 2 if isinstance(number, float) else 0
for symbol in reversed(symbols):
value = float(number) / prefix[symbol]
if value > 1:
decimal_precision = 0
if value < 10:
decimal_precision = 2
elif value < 100:
decimal_precision = 1
if low_precision:
if symbol in 'MK':
decimal_precision = 0
else:
decimal_precision = min(1, decimal_precision)
elif symbol in 'K':
decimal_precision = 0
return '{:.{decimal}f}{symbol}'.format(value, decimal=decimal_precision, symbol=symbol)
return f'{number:.{decimal_precision}f}'
def string_value_to_float(s):
"""Convert a string with a value and an unit to a float.
Example:
@ -594,181 +531,3 @@ def atoi(text):
def natural_keys(text):
"""Return a text in a natural/human readable format."""
return [atoi(c) for c in re.split(r'(\d+)', text)]
def exit_after(seconds, default=None):
"""Exit the function if it takes more than 'seconds' seconds to complete.
In this case, return the value of 'default' (default: None)."""
def handler(q, func, args, kwargs):
q.put(func(*args, **kwargs))
def decorator(func):
if not LINUX:
return func
def wraps(*args, **kwargs):
try:
q = ctx_mp_fork.Queue()
except PermissionError:
# Manage an exception in Snap packages on Linux
# The strict mode prevent the use of multiprocessing.Queue()
# There is a "dirty" hack:
# https://forum.snapcraft.io/t/python-multiprocessing-permission-denied-in-strictly-confined-snap/15518/2
# But i prefer to just disable the timeout feature in this case
func(*args, **kwargs)
else:
p = ctx_mp_fork.Process(target=handler, args=(q, func, args, kwargs))
p.start()
p.join(timeout=seconds)
if not p.is_alive():
return q.get()
p.terminate()
p.join(timeout=0.1)
if p.is_alive():
# Kill in case processes doesn't terminate
# Happens with cases like broken NFS connections
p.kill()
return default
return wraps
return decorator
def split_esc(input_string, sep=None, maxsplit=-1, esc='\\'):
"""
Return a list of the substrings in the input_string, using sep as the separator char
and esc as the escape character.
sep
The separator used to split the input_string.
When set to None (the default value), will split on any whitespace
character (including \n \r \t \f and spaces) unless the character is escaped
and will discard empty strings from the result.
maxsplit
Maximum number of splits.
-1 (the default value) means no limit.
esc
The character used to escape the separator.
When set to None, this behaves equivalently to `str.split`.
Defaults to '\\\\' i.e. backslash.
Splitting starts at the front of the input_string and works to the end.
Note: escape characters in the substrings returned are removed. However, if
maxsplit is reached, escape characters in the remaining, unprocessed substring
are not removed, which allows split_esc to be called on it again.
"""
# Input validation
if not isinstance(input_string, str):
raise TypeError(f'must be str, not {input_string.__class__.__name__}')
str.split('', sep=sep, maxsplit=maxsplit) # Use str.split to validate sep and maxsplit
if esc is None:
return input_string.split(
sep=sep, maxsplit=maxsplit
) # Short circuit to default implementation if the escape character is None
if not isinstance(esc, str):
raise TypeError(f'must be str or None, not {esc.__class__.__name__}')
if len(esc) == 0:
raise ValueError('empty escape character')
if len(esc) > 1:
raise ValueError('escape must be a single character')
# Set up a simple state machine keeping track of whether we have seen an escape character
ret, esc_seen, i = [''], False, 0
while i < len(input_string) and len(ret) - 1 != maxsplit:
if not esc_seen:
if input_string[i] == esc:
# Consume the escape character and transition state
esc_seen = True
i += 1
elif sep is None and input_string[i].isspace():
# Consume as much whitespace as possible
n = 1
while i + n + 1 < len(input_string) and input_string[i + n : i + n + 1].isspace():
n += 1
ret.append('')
i += n
elif sep is not None and input_string[i : i + len(sep)] == sep:
# Consume the separator
ret.append('')
i += len(sep)
else:
# Otherwise just add the current char
ret[-1] += input_string[i]
i += 1
else:
# Add the current char and transition state back
ret[-1] += input_string[i]
esc_seen = False
i += 1
# Append any remaining string if we broke early because of maxsplit
if i < len(input_string):
ret[-1] += input_string[i:]
# If splitting on whitespace, discard empty strings from result
if sep is None:
ret = [sub for sub in ret if len(sub) > 0]
return ret
def get_ip_address(ipv6=False):
"""Get current IP address and netmask as a tuple."""
family = socket.AF_INET6 if ipv6 else socket.AF_INET
# Get IP address
stats = psutil.net_if_stats()
addrs = psutil.net_if_addrs()
ip_address = None
ip_netmask = None
for interface, stat in stats.items():
if stat.isup and interface != 'lo':
if interface in addrs:
for addr in addrs[interface]:
if addr.family == family:
ip_address = addr.address
ip_netmask = addr.netmask
break
return ip_address, ip_netmask
def get_default_gateway(ipv6=False):
"""Get the default gateway IP address."""
def convert_ipv4(gateway_hex):
"""Convert IPv4 hex (little-endian) to dotted notation."""
return '.'.join(str(int(gateway_hex[i : i + 2], 16)) for i in range(6, -1, -2))
def convert_ipv6(gateway_hex):
"""Convert IPv6 hex to colon notation."""
return ':'.join(gateway_hex[i : i + 4] for i in range(0, 32, 4))
if ipv6:
route_file = '/proc/net/ipv6_route'
default_dest = '00000000000000000000000000000000'
dest_field = 0
gateway_field = 4
converter = convert_ipv6
else:
route_file = '/proc/net/route'
default_dest = '00000000'
dest_field = 1
gateway_field = 2
converter = convert_ipv4
try:
with open(route_file) as f:
for line in f:
fields = line.strip().split()
if fields[dest_field] == default_dest:
return converter(fields[gateway_field])
except (FileNotFoundError, IndexError, ValueError):
return None
return None

View File

@ -103,14 +103,14 @@ Examples of use:
"""
def __init__(self):
def __init__(self, args_begin_at=1):
"""Manage the command line arguments."""
self.init_glances()
self.init_glances(args_begin_at)
def init_glances(self):
def init_glances(self, args_begin_at):
"""Main method to init Glances."""
# Read the command line arguments or parse the one given in parameter (parser)
self.args = self.parse_args()
self.args = self.parse_args(args_begin_at)
# Load the configuration file, if it exists
# This function should be called after the parse_args
@ -152,10 +152,6 @@ Examples of use:
if not self.args.process_filter and not self.is_standalone():
logger.debug("Process filter is only available in standalone mode")
# Focus filter is only available in standalone mode
if not self.args.process_focus and not self.is_standalone():
logger.debug("Process focus is only available in standalone mode")
# Cursor option is only available in standalone mode
if not self.args.disable_cursor and not self.is_standalone():
logger.debug("Cursor is only available in standalone mode")
@ -341,12 +337,6 @@ Examples of use:
dest='disable_cursor',
help='disable cursor (process selection) in the UI',
)
parser.add_argument(
'--arrow-keys-sort', # See issue #3385
action='store_true',
default=False,
help='Use arrow keys to sort the process list instead of the SHIFT+key combinations',
)
# Sort processes list
parser.add_argument(
'--sort-processes',
@ -389,7 +379,7 @@ Examples of use:
default=None,
type=str,
dest='export_process_filter',
help='set the export process filter (comma-separated list of regular expression)',
help='set the export process filter (comman separated list of regular expression)',
)
# Client/Server option
parser.add_argument(
@ -506,14 +496,6 @@ Examples of use:
dest='process_filter',
help='set the process filter pattern (regular expression)',
)
# Process will focus on some process (comma-separated list of Glances filter)
parser.add_argument(
'--process-focus',
default=None,
type=str,
dest='process_focus',
help='set a process list to focus on (comma-separated list of Glances filter)',
)
parser.add_argument(
'--process-short-name',
action='store_true',
@ -568,18 +550,7 @@ Examples of use:
help='test memory leak (python 3.4 or higher needed)',
)
parser.add_argument(
'--api-doc',
default=None,
action='store_true',
dest='stdout_api_doc',
help='display Python API documentation',
)
parser.add_argument(
'--api-restful-doc',
default=None,
action='store_true',
dest='stdout_api_restful_doc',
help='display Restful API documentation',
'--api-doc', default=None, action='store_true', dest='stdout_apidoc', help='display fields descriptions'
)
if not WINDOWS:
parser.add_argument(
@ -611,13 +582,6 @@ Examples of use:
dest='diskio_iops',
help='show IO per second in the DiskIO plugin',
)
parser.add_argument(
'--diskio-latency',
action='store_true',
default=False,
dest='diskio_latency',
help='show IO latency in the DiskIO plugin',
)
parser.add_argument(
'--fahrenheit',
action='store_true',
@ -666,22 +630,6 @@ Examples of use:
default='',
help='strftime format string for displaying current date in standalone mode',
)
# Fetch
parser.add_argument(
'--fetch',
'--stdout-fetch',
action='store_true',
default=False,
dest='stdout_fetch',
help='display a (neo)fetch like summary and exit',
)
parser.add_argument(
'--fetch-template',
'--stdout-fetch-template',
dest='fetch_template',
default='',
help='overwrite default fetch template file',
)
return parser
@ -741,10 +689,7 @@ Examples of use:
args.network_cumul = False
# Processlist is updated in processcount
if getattr(args, 'disable_processcount', False):
logger.warning('Processcount is disable, so processlist (updated by processcount) is also disable')
disable(args, 'processlist')
elif getattr(args, 'enable_processlist', False) or getattr(args, 'enable_programlist', False):
if getattr(args, 'enable_processlist', False) or getattr(args, 'enable_programlist', False):
enable(args, 'processcount')
# Set a default export_process_filter (with all process) when using the stdout mode
@ -842,10 +787,6 @@ Examples of use:
disable(args, 'memswap')
disable(args, 'load')
# Unicode => No separator
if args.disable_unicode:
args.enable_separator = False
# Memory leak
if getattr(args, 'memory_leak', False):
logger.info('Memory leak detection enabled')
@ -855,18 +796,15 @@ Examples of use:
args.time = 1
args.disable_history = True
# Disable history if history_size is 0
if self.config.has_section('global'):
if self.config.get_int_value('global', 'history_size', default=1200) == 0:
args.disable_history = True
# Unicode => No separator
if args.disable_unicode:
args.enable_separator = False
# Display an information message if history is disabled
if args.disable_history:
logger.info("Stats history is disabled")
def parse_args(self):
"""Parse command line arguments."""
return self.init_args().parse_args(sys.argv[1:])
def parse_args(self, args_begin_at):
"""Parse command line arguments.
Glances args start at position args_begin_at.
"""
return self.init_args().parse_args(sys.argv[args_begin_at:])
def check_mode_compatibility(self):
"""Check mode compatibility"""

View File

@ -13,10 +13,11 @@ import os
import pickle
import threading
from datetime import datetime, timedelta
from ssl import CertificateError
from glances import __version__
from glances.config import user_cache_dir
from glances.globals import nativestr, safe_makedirs, urlopen
from glances.globals import HTTPError, URLError, nativestr, safe_makedirs, urlopen
from glances.logger import logger
try:
@ -154,7 +155,7 @@ class Outdated:
try:
res = urlopen(PYPI_API_URL, timeout=3).read()
except Exception as e:
except (HTTPError, URLError, CertificateError) as e:
logger.debug(f"Cannot get Glances version from the PyPI RESTful API ({e})")
else:
self.data['latest_version'] = json.loads(nativestr(res))['info']['version']

View File

@ -29,8 +29,8 @@ class Bar:
size,
bar_char='|',
empty_char=' ',
pre_char='',
post_char='',
pre_char='[',
post_char=']',
unit_char='%',
display_value=True,
min_value=0,
@ -42,8 +42,8 @@ class Bar:
size (_type_): Bar size
bar_char (str, optional): Bar character. Defaults to '|'.
empty_char (str, optional): Empty character. Defaults to ' '.
pre_char (str, optional): Display this char before the bar. Defaults to ''.
post_char (str, optional): Display this char after the bar. Defaults to ''.
pre_char (str, optional): Display this char before the bar. Defaults to '['.
post_char (str, optional): Display this char after the bar. Defaults to ']'.
unit_char (str, optional): Unit char to be displayed. Defaults to '%'.
display_value (bool, optional): Do i need to display the value. Defaults to True.
min_value (int, optional): Minimum value. Defaults to 0.
@ -68,10 +68,12 @@ class Bar:
@property
def size(self, with_decoration=False):
# Return the bar size
# Return the bar size, with or without decoration
if with_decoration:
return self.__size
if self.__display_value:
return self.__size - 6
return self.__size
return None
@property
def percent(self):
@ -103,9 +105,6 @@ class Bar:
whole += 1
ret += self.__empty_char * int(self.size - whole)
# Add the post and pre chars
ret = f'{self.__pre_char}{ret}{self.__post_char}'
# Add the value
if self.__display_value:
if self.percent >= self.max_value:

View File

@ -51,7 +51,7 @@ class _GlancesCurses:
'a': {'sort_key': 'auto'},
'A': {'switch': 'disable_amps'},
'b': {'switch': 'byte'},
'B': {'handler': '_handle_diskio_iops'},
'B': {'switch': 'diskio_iops'},
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
@ -69,7 +69,6 @@ class _GlancesCurses:
# 'k' > Kill selected process
'K': {'switch': 'disable_connections'},
'l': {'switch': 'disable_alert'},
'L': {'handler': '_handle_diskio_latency'},
'm': {'sort_key': 'memory_percent'},
'M': {'switch': 'reset_minmax_tag'},
'n': {'switch': 'disable_network'},
@ -93,10 +92,8 @@ class _GlancesCurses:
'z': {'handler': '_handle_disable_process'},
'+': {'handler': '_handle_increase_nice'},
'-': {'handler': '_handle_decrease_nice'},
# "<" (shift + left arrow) navigation through process sort
# ">" (shift + right arrow) navigation through process sort
# "<" (left arrow) scroll through process name
# ">" (right arrow) scroll through process name
# "<" (left arrow) navigation through process sort
# ">" (right arrow) navigation through process sort
# 'UP' > Up in the server list
# 'DOWN' > Down in the server list
}
@ -110,7 +107,7 @@ class _GlancesCurses:
# Define left sidebar
# This variable is used in the make webui task in order to generate the
# glances/outputs/static/js/uiconfig.json file for the web interface
# This list can also be overwritten by the configuration file ([outputs] left_menu option)
# This lidt can also be overwritten by the configuration file ([outputs] left_menu option)
_left_sidebar = [
'network',
'ports',
@ -188,8 +185,6 @@ class _GlancesCurses:
# Init Glances cursor
self.args.cursor_position = 0
self.args.cursor_process_name_position = 0
# For the moment cursor only available in standalone mode
self.args.disable_cursor = not self.args.is_standalone
@ -198,9 +193,6 @@ class _GlancesCurses:
self.term_window.nodelay(1)
self.pressedkey = -1
# Is this the end ?
self.is_end = False
# History tag
self._init_history()
@ -255,6 +247,7 @@ class _GlancesCurses:
pass
def get_key(self, window):
# TODO: Check issue #163
return window.getch()
def catch_actions_from_hotkey(self, hotkey):
@ -270,14 +263,8 @@ class _GlancesCurses:
{
self.pressedkey in {ord('e')} and not self.args.programs: self._handle_process_extended,
self.pressedkey in {ord('k')} and not self.args.disable_cursor: self._handle_kill_process,
self.pressedkey
in {curses.KEY_LEFT if self.args.arrow_keys_sort else curses.KEY_SLEFT}: self._handle_sort_left,
self.pressedkey
in {curses.KEY_RIGHT if self.args.arrow_keys_sort else curses.KEY_SRIGHT}: self._handle_sort_right,
self.pressedkey
in {curses.KEY_SLEFT if self.args.arrow_keys_sort else curses.KEY_LEFT}: self._handle_process_name_left,
self.pressedkey
in {curses.KEY_SRIGHT if self.args.arrow_keys_sort else curses.KEY_RIGHT}: self._handle_process_name_right,
self.pressedkey in {curses.KEY_LEFT}: self._handle_sort_left,
self.pressedkey in {curses.KEY_RIGHT}: self._handle_sort_right,
self.pressedkey in {curses.KEY_UP, 65} and not self.args.disable_cursor: self._handle_cursor_up,
self.pressedkey in {curses.KEY_DOWN, 66} and not self.args.disable_cursor: self._handle_cursor_down,
self.pressedkey in {curses.KEY_F5, 18}: self._handle_refresh,
@ -363,13 +350,6 @@ class _GlancesCurses:
def _handle_kill_process(self):
self.kill_process = not self.kill_process
def _handle_process_name_left(self):
if self.args.cursor_process_name_position > 0:
self.args.cursor_process_name_position -= 1
def _handle_process_name_right(self):
self.args.cursor_process_name_position += 1
def _handle_clean_logs(self):
glances_events.clean()
@ -383,18 +363,6 @@ class _GlancesCurses:
else:
glances_processes.enable()
def _handle_diskio_iops(self):
"""Switch between bytes/s and IOPS for Disk IO."""
self.args.diskio_iops = not self.args.diskio_iops
if self.args.diskio_iops:
self.args.diskio_latency = False
def _handle_diskio_latency(self):
"""Switch between bytes/s and latency for Disk IO."""
self.args.diskio_latency = not self.args.diskio_latency
if self.args.diskio_latency:
self.args.diskio_iops = False
def _handle_sort_left(self):
next_sort = (self.loop_position() - 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
@ -416,10 +384,6 @@ class _GlancesCurses:
logger.info("Stop Glances client and return to the browser")
else:
logger.info(f"Stop Glances (keypressed: {self.pressedkey})")
# End the curses window
self.end()
# Exit the program
sys.exit(0)
def _handle_refresh(self):
glances_processes.reset_internal_cache()
@ -466,7 +430,6 @@ class _GlancesCurses:
curses.endwin()
except Exception:
pass
self.is_end = True
def init_line_column(self):
"""Init the line and column position for the curses interface."""
@ -1165,11 +1128,6 @@ class _GlancesCurses:
while not countdown.finished() and not isexitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
if pressedkey == -1:
self.wait()
continue
isexitkey = pressedkey == ord('\x1b') or pressedkey == ord('q')
if pressedkey == curses.KEY_F5 or self.pressedkey == 18:
@ -1177,7 +1135,7 @@ class _GlancesCurses:
self.clear()
return isexitkey
if pressedkey in (curses.KEY_UP, 65, curses.KEY_DOWN, 66, curses.KEY_LEFT, 68, curses.KEY_RIGHT, 67):
if pressedkey in (curses.KEY_UP, 65, curses.KEY_DOWN, 66):
# Up of won key pressed, reset the countdown
# Better for user experience
countdown.reset()

View File

@ -10,7 +10,6 @@
import curses
import math
import sys
from glances.logger import logger
from glances.outputs.glances_curses import _GlancesCurses
@ -50,6 +49,7 @@ class GlancesCursesBrowser(_GlancesCurses):
self._page_max = 0
self._page_max_lines = 0
self.is_end = False
self._revesed_sorting = False
self._stats_list = None
@ -87,7 +87,7 @@ class GlancesCursesBrowser(_GlancesCurses):
counts[color] = counts.get(color, 0) + 1
result = ''
for key in counts:
for key in counts.keys():
result += key + ': ' + str(counts[key]) + ' '
return result
@ -157,7 +157,8 @@ class GlancesCursesBrowser(_GlancesCurses):
# 'ESC'|'q' > Quit
self.end()
logger.info("Stop Glances client browser")
sys.exit(0)
# sys.exit(0)
self.is_end = True
elif self.pressedkey == 10:
# 'ENTER' > Run Glances on the selected server
self.active_server = self._current_page * self._page_max_lines + self.cursor_position
@ -326,15 +327,10 @@ class GlancesCursesBrowser(_GlancesCurses):
y += 1
# Second line (for item/key)
for k, v in column_def.items():
if xc >= screen_x or y >= screen_y or v is None:
continue
k_split = k.split('_')
if len(k_split) == 1:
header_str = k_split[0]
else:
header_str = ' '.join(k_split[1:])
self.term_window.addnstr(y, xc, header_str.upper(), screen_x - x, self.colors_list['BOLD'])
xc += v + self.space_between_column
if xc < screen_x and y < screen_y and v is not None:
self.term_window.addnstr(y, xc, ' '.join(k_split[1:]).upper(), screen_x - x, self.colors_list['BOLD'])
xc += v + self.space_between_column
y += 1
# If a servers has been deleted from the list...

View File

@ -6,21 +6,21 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""RestFul API interface class."""
"""RestFull API interface class."""
import os
import socket
import sys
import tempfile
import webbrowser
from typing import Annotated, Any
from typing import Annotated, Any, Union
from urllib.parse import urljoin
from glances import __apiversion__, __version__
from glances.events_list import glances_events
from glances.globals import json_dumps
from glances.globals import json_dumps, weak_lru_cache
from glances.logger import logger
from glances.password import GlancesPassword
from glances.plugins.plugin.dag import get_plugin_dependencies
from glances.processes import glances_processes
from glances.servers_list import GlancesServersList
from glances.servers_list_dynamic import GlancesAutoDiscoverClient
@ -46,6 +46,7 @@ try:
except ImportError:
logger.critical('Uvicorn import error. Glances cannot start in web server mode.')
sys.exit(2)
import builtins
import contextlib
import threading
import time
@ -119,7 +120,7 @@ class GlancesRestfulApi:
self.load_config(config)
# Set the bind URL
self.bind_url = urljoin(f'{self.protocol}://{self.args.bind_address}:{self.args.port}/', self.url_prefix)
self.bind_url = urljoin(f'http://{self.args.bind_address}:{self.args.port}/', self.url_prefix)
# FastAPI Init
if self.args.password:
@ -181,23 +182,11 @@ class GlancesRestfulApi:
if self.url_prefix != '':
self.url_prefix = self.url_prefix.rstrip('/')
logger.debug(f'URL prefix: {self.url_prefix}')
# SSL
self.ssl_keyfile = config.get_value('outputs', 'ssl_keyfile', default=None)
self.ssl_keyfile_password = config.get_value('outputs', 'ssl_keyfile_password', default=None)
self.ssl_certfile = config.get_value('outputs', 'ssl_certfile', default=None)
self.protocol = 'https' if self.is_ssl() else 'http'
logger.debug(f"Protocol for Resful API and WebUI: {self.protocol}")
def is_ssl(self):
"""Return true if the Glances server use SSL."""
return self.ssl_keyfile is not None and self.ssl_certfile is not None
def __update_stats(self, plugins_list_to_update=None):
def __update_stats(self):
# Never update more than 1 time per cached_time
# Also update if specific plugins are requested
# In this case, lru_cache will handle the stat's update frequency
if self.timer.finished() or plugins_list_to_update:
self.stats.update(plugins_list_to_update=plugins_list_to_update)
if self.timer.finished():
self.stats.update()
self.timer = Timer(self.args.cached_time)
def __update_servers_list(self):
@ -218,16 +207,6 @@ class GlancesRestfulApi:
status.HTTP_401_UNAUTHORIZED, "Incorrect username or password", {"WWW-Authenticate": "Basic"}
)
def _logo(self):
return rf"""
_____ _
/ ____| |
| | __| | __ _ _ __ ___ ___ ___
| | |_ | |/ _` | '_ \ / __/ _ \/ __|
| |__| | | (_| | | | | (_| __/\__
\_____|_|\__,_|_| |_|\___\___||___/ {__version__}
"""
def _router(self) -> APIRouter:
"""Define a custom router for Glances path."""
base_path = f'/api/{self.API_VERSION}'
@ -287,9 +266,6 @@ class GlancesRestfulApi:
for path, endpoint in route_mapping.items():
router.add_api_route(path, endpoint)
# Logo
print(self._logo())
# Browser WEBUI
if hasattr(self.args, 'browser') and self.args.browser:
# Template for the root browser.html file
@ -346,12 +322,7 @@ class GlancesRestfulApi:
def _start_uvicorn(self):
# Run the Uvicorn Web server
uvicorn_config = uvicorn.Config(
self._app,
host=self.args.bind_address,
port=self.args.port,
access_log=self.args.debug,
ssl_keyfile=self.ssl_keyfile,
ssl_certfile=self.ssl_certfile,
self._app, host=self.args.bind_address, port=self.args.port, access_log=self.args.debug
)
try:
self.uvicorn_server = GlancesUvicornServer(config=uvicorn_config)
@ -465,8 +436,7 @@ class GlancesRestfulApi:
HTTP/1.1 404 Not Found
"""
# Update the stat
# TODO: Why ??? Try to comment it
# self.__update_stats()
self.__update_stats()
try:
plist = self.plugins_list
@ -486,7 +456,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(self.servers_list.get_servers_list() if self.servers_list else [])
# Comment this solve an issue on Home Assistant See #3238
@weak_lru_cache(maxsize=1, ttl=1)
def _api_all(self):
"""Glances API RESTful implementation.
@ -495,6 +465,13 @@ class GlancesRestfulApi:
HTTP/400 if plugin is not found
HTTP/404 if others error
"""
if self.args.debug:
fname = os.path.join(tempfile.gettempdir(), 'glances-debug.json')
try:
with builtins.open(fname) as f:
return f.read()
except OSError:
logger.debug(f"Debug file ({fname}) not found")
# Update the stat
self.__update_stats()
@ -508,6 +485,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(statval)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_all_limits(self):
"""Glances API RESTful implementation.
@ -524,6 +502,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(limits)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_all_views(self):
"""Glances API RESTful implementation.
@ -540,6 +519,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(limits)
@weak_lru_cache(maxsize=1, ttl=1)
def _api(self, plugin: str):
"""Glances API RESTful implementation.
@ -551,11 +531,11 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat ID
statval = self.stats.get_plugin(plugin).get_api()
statval = self.stats.get_plugin(plugin).get_raw()
except Exception as e:
raise HTTPException(status.HTTP_404_NOT_FOUND, f"Cannot get plugin {plugin} ({str(e)})")
@ -569,6 +549,7 @@ class GlancesRestfulApi:
status.HTTP_400_BAD_REQUEST, f"Unknown plugin {plugin} (available plugins: {self.plugins_list})"
)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_top(self, plugin: str, nb: int = 0):
"""Glances API RESTful implementation.
@ -582,11 +563,12 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat ID
statval = self.stats.get_plugin(plugin).get_api()
# TODO in #3211: use get_export instead but break API
statval = self.stats.get_plugin(plugin).get_raw()
except Exception as e:
raise HTTPException(status.HTTP_404_NOT_FOUND, f"Cannot get plugin {plugin} ({str(e)})")
@ -595,6 +577,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(statval)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_history(self, plugin: str, nb: int = 0):
"""Glances API RESTful implementation.
@ -607,7 +590,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat ID
@ -617,6 +600,7 @@ class GlancesRestfulApi:
return statval
@weak_lru_cache(maxsize=1, ttl=1)
def _api_limits(self, plugin: str):
"""Glances API RESTful implementation.
@ -635,6 +619,7 @@ class GlancesRestfulApi:
return GlancesJSONResponse(ret)
@weak_lru_cache(maxsize=1, ttl=1)
def _api_views(self, plugin: str):
"""Glances API RESTful implementation.
@ -667,7 +652,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat views
@ -692,7 +677,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat views
@ -717,7 +702,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat views
@ -741,7 +726,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat views
@ -766,7 +751,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value of the stat history
@ -814,7 +799,7 @@ class GlancesRestfulApi:
else:
return GlancesJSONResponse(ret)
def _api_value(self, plugin: str, item: str, value: str | int | float):
def _api_value(self, plugin: str, item: str, value: Union[str, int, float]):
"""Glances API RESTful implementation.
Return the process stats (dict) for the given item=value
@ -825,7 +810,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats(get_plugin_dependencies(plugin))
self.__update_stats()
try:
# Get the RAW value
@ -997,6 +982,3 @@ class GlancesRestfulApi:
raise HTTPException(status.HTTP_404_NOT_FOUND, f"Unknown PID process {pid}")
return GlancesJSONResponse(process_stats)
# End of GlancesRestfulApi class

View File

@ -1,286 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Generate Glances Python API documentation."""
from pprint import pformat
from glances import api
APIDOC_HEADER = """\
.. _api:
Python API documentation
========================
This documentation describes the Glances Python API.
Note: This API is only available in Glances 4.4.0 or higher.
"""
def printtab(s, indent=' '):
print(indent + s.replace('\n', '\n' + indent))
def print_tldr(gl):
"""Print the TL;DR section of the API documentation."""
sub_title = 'TL;DR'
print(sub_title)
print('-' * len(sub_title))
print('')
print('You can access the Glances API by importing the `glances.api` module and creating an')
print('instance of the `GlancesAPI` class. This instance provides access to all Glances plugins')
print('and their fields. For example, to access the CPU plugin and its total field, you can')
print('use the following code:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> from glances import api')
printtab('>>> gl = api.GlancesAPI()')
printtab('>>> gl.cpu')
printtab(f'{pformat(gl.cpu.stats)}')
printtab('>>> gl.cpu.get("total")')
printtab(f'{gl.cpu.get("total")}')
printtab('>>> gl.mem.get("used")')
printtab(f'{gl.mem.get("used")}')
printtab('>>> gl.auto_unit(gl.mem.get("used"))')
printtab(f'{gl.auto_unit(gl.mem.get("used"))}')
print('')
print('If the stats return a list of items (like network interfaces or processes), you can')
print('access them by their name:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.network.keys()')
printtab(f'{gl.network.keys()}')
printtab(f'>>> gl.network["{gl.network.keys()[0]}"]')
printtab(f'{pformat(gl.network[gl.network.keys()[0]])}')
print('')
def print_init_api(gl):
sub_title = 'Init Glances Python API'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Init the Glances API:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> from glances import api')
printtab('>>> gl = api.GlancesAPI()')
print('')
def print_plugins_list(gl):
sub_title = 'Get Glances plugins list'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Get the plugins list:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.plugins()')
printtab(f'{gl.plugins()}')
print('')
def print_plugin(gl, plugin):
"""Print the details of a single plugin."""
sub_title = f'Glances {plugin}'
print(sub_title)
print('-' * len(sub_title))
print('')
stats_obj = gl.__getattr__(plugin)
print(f'{plugin.capitalize()} stats:')
print('')
print('.. code-block:: python')
print('')
printtab(f'>>> type(gl.{plugin})')
printtab(f'{type(stats_obj)}')
if len(stats_obj.keys()) > 0 and isinstance(stats_obj[stats_obj.keys()[0]], dict):
printtab(f'>>> gl.{plugin}')
printtab(f'Return a dict of dict with key=<{stats_obj[stats_obj.keys()[0]]["key"]}>')
printtab(f'>>> gl.{plugin}.keys()')
printtab(f'{stats_obj.keys()}')
printtab(f'>>> gl.{plugin}.get("{stats_obj.keys()[0]}")')
printtab(f'{pformat(stats_obj[stats_obj.keys()[0]])}')
else:
printtab(f'>>> gl.{plugin}')
printtab(f'{pformat(stats_obj.stats)}')
if len(stats_obj.keys()) > 0:
printtab(f'>>> gl.{plugin}.keys()')
printtab(f'{stats_obj.keys()}')
printtab(f'>>> gl.{plugin}.get("{stats_obj.keys()[0]}")')
printtab(f'{pformat(stats_obj[stats_obj.keys()[0]])}')
print('')
if stats_obj.fields_description is not None:
print(f'{plugin.capitalize()} fields description:')
print('')
for field, description in stats_obj.fields_description.items():
print(f'* {field}: {description["description"]}')
print('')
print(f'{plugin.capitalize()} limits:')
print('')
print('.. code-block:: python')
print('')
printtab(f'>>> gl.{plugin}.limits')
printtab(f'{pformat(gl.__getattr__(plugin).limits)}')
print('')
def print_plugins(gl):
"""Print the details of all plugins."""
for plugin in [p for p in gl.plugins() if p not in ['help', 'programlist']]:
print_plugin(gl, plugin)
def print_auto_unit(gl):
sub_title = 'Use auto_unit to display a human-readable string with the unit'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use auto_unit() function to generate a human-readable string with the unit:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.mem.get("used")')
printtab(f'{gl.mem.get("used")}')
print('')
printtab('>>> gl.auto_unit(gl.mem.get("used"))')
printtab(f'{gl.auto_unit(gl.mem.get("used"))}')
print('')
print("""
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
""")
def print_bar(gl):
sub_title = 'Use to display stat as a bar'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use bar() function to generate a bar:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.bar(gl.mem["percent"])')
printtab(f'{gl.bar(gl.mem.get("percent"))}')
print('')
print("""
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
""")
def print_top_process(gl):
sub_title = 'Use to display top process list'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use top_process() function to generate a list of top processes sorted by CPU or MEM usage:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.top_process()')
printtab(f'{gl.top_process()}')
print('')
print("""
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
""")
class GlancesStdoutApiDoc:
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):
# Init
self.gl = api.GlancesAPI()
def end(self):
pass
def update(self, stats, duration=1):
"""Display issue"""
# Display header
print(APIDOC_HEADER)
# Display TL;DR section
print_tldr(self.gl)
# Init the API
print_init_api(self.gl)
# Display plugins list
print_plugins_list(self.gl)
# Loop over plugins
print_plugins(self.gl)
# Others helpers
print_auto_unit(self.gl)
print_bar(self.gl)
print_top_process(self.gl)
# Return True to exit directly (no refresh)
return True

View File

@ -6,32 +6,33 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Generate Glances Restful API documentation."""
"""Fields description interface class."""
import json
import time
from pprint import pformat
from glances import __apiversion__
from glances.globals import iteritems
from glances.logger import logger
API_URL = f"http://localhost:61208/api/{__apiversion__}"
APIDOC_HEADER = f"""\
.. _api_restful:
.. _api:
Restful/JSON API documentation
==============================
API (Restfull/JSON) documentation
=================================
This documentation describes the Glances API version {__apiversion__} (Restful/JSON) interface.
This documentation describes the Glances API version {__apiversion__} (Restfull/JSON) interface.
An OpenAPI specification file is available at:
``https://raw.githubusercontent.com/nicolargo/glances/refs/heads/develop/docs/api/openapi.json``
``https://raw.githubusercontent.com/nicolargo/glances/refs/heads/develop/docs/openapi.json``
Run the Glances API server
--------------------------
The Glances Restful/API server could be ran using the following command line:
The Glances Restfull/API server could be ran using the following command line:
.. code-block:: bash
@ -135,7 +136,7 @@ def print_plugin_description(plugin, stat):
print('Fields descriptions:')
print('')
time_since_update = False
for field, description in stat.fields_description.items():
for field, description in iteritems(stat.fields_description):
print(
'* **{}**: {} (unit is *{}*)'.format(
field,
@ -353,7 +354,7 @@ def print_plugin_post_events():
print('')
class GlancesStdoutApiRestfulDoc:
class GlancesStdoutApiDoc:
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):

View File

@ -55,12 +55,12 @@ class GlancesStdoutCsv:
line += f'{plugin}.{attribute}{self.separator}'
else:
if isinstance(stat, dict):
for k in stat:
for k in stat.keys():
line += f'{plugin}.{str(k)}{self.separator}'
elif isinstance(stat, list):
for i in stat:
if isinstance(i, dict) and 'key' in i:
for k in i:
for k in i.keys():
line += '{}.{}.{}{}'.format(plugin, str(i[i['key']]), str(k), self.separator)
else:
line += f'{plugin}{self.separator}'

View File

@ -1,87 +0,0 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Fetch mode interface class."""
import jinja2
from glances import api
from glances.logger import logger
DEFAULT_FETCH_TEMPLATE = """
{{ gl.system['hostname'] }}{{ ' | ' + gl.ip['address'] if gl.ip['address'] else '' }} | Uptime: {{ gl.uptime }}
{{ gl.system['hr_name'] }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }}/min1 |\
{{ '%0.2f'| format(gl.load['min5']) }}/min5 |\
{{ '%0.2f'| format(gl.load['min15']) }}/min15
CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} /\
{{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}\
💾 {% if loop.index == 1 %}DISK{% else %} {% endif %}\
{{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} /\
{{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}\
{% for net in gl.network.keys() %}\
📡 {% if loop.index == 1 %}NET{% else %} {% endif %}\
{{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s\
{{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}\
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}\
{{ loop.index }} {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
{{ process['cpu_percent'] }}% CPU\
{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}\
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}\
{{ loop.index }} {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM\
{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }}\
{{ process['cpu_percent'] }}% CPU
{% endfor %}\
"""
class GlancesStdoutFetch:
"""This class manages the Stdout JSON display."""
def __init__(self, config=None, args=None):
# Init
self.config = config
self.args = args
self.gl = api.GlancesAPI(self.config, self.args)
def end(self):
pass
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Display fetch from the template file to stdout."""
if self.args.fetch_template == "":
fetch_template = DEFAULT_FETCH_TEMPLATE
else:
logger.info("Using fetch template file: " + self.args.fetch_template)
# Load the template from the file given in the self.args.fetch_template argument
with open(self.args.fetch_template) as f:
fetch_template = f.read()
# Create a Jinja2 environment
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), autoescape=True)
template = jinja_env.from_string(fetch_template)
output = template.render(gl=self.gl)
print(output)
# Return True to exit directly (no refresh)
return True

View File

@ -98,7 +98,7 @@ class GlancesStdoutIssue:
stat = stats.get_plugin(plugin).get_export()
# Hide private information
if plugin == 'ip':
for key in stat:
for key in stat.keys():
stat[key] = '***'
except Exception as e:
stat_error = e

View File

@ -17,7 +17,6 @@ _unicode_message = {
'PROCESS_SELECTOR': ['>', '>'],
'MEDIUM_LINE': ['\u2500', ''],
'LOW_LINE': ['\u2581', '_'],
'THREE_DOTS': ['\u2026', '...'],
}

View File

@ -1,10 +1,10 @@
module.exports = {
printWidth: 100,
arrowParens: "always",
bracketSpacing: true,
semi: true,
singleQuote: true,
tabWidth: 4,
trailingComma: "none",
useTabs: false,
printWidth: 100,
arrowParens: 'always',
bracketSpacing: true,
semi: true,
singleQuote: true,
tabWidth: 4,
trailingComma: 'none',
useTabs: false
};

View File

@ -33,7 +33,6 @@ hash -r
You must run the following command from the `glances/outputs/static/` directory.
```bash
.venv/bin/python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
cd glances/outputs/static/
```

View File

@ -169,10 +169,10 @@ body {
}
.button {
color: #99CCFF;
background: rgba(0, 0, 0, 0.4);
border: 1px solid #99CCFF;
padding: 1px 5px;
color: #99CCFF; /* Bleu clair high-tech */
background: rgba(0, 0, 0, 0.4); /* Fond légèrement transparent */
border: 1px solid #99CCFF; /* Bordure discrète */
padding: 5px 10px;
border-radius: 5px;
letter-spacing: 1px;
cursor: pointer;
@ -182,14 +182,14 @@ body {
}
.button:hover {
background: rgba(183, 214, 255, 0.30);
background: rgba(153, 204, 255, 0.15); /* Légère coloration au survol */
border-color: #B0D0FF;
color: #B0D0FF;
}
.button:active {
transform: scale(0.95);
box-shadow: 0 0 8px rgba(153, 204, 255, 0.5);
transform: scale(0.95); /* Légère réduction pour effet de pression */
box-shadow: 0 0 8px rgba(153, 204, 255, 0.5); /* Flash léger */
}
.frequency {
@ -413,8 +413,6 @@ body {
#processlist {
overflow-y: auto;
height: 600px;
margin-top: 1em;
.table {
margin-bottom: 1em;
}

View File

@ -1,29 +1,29 @@
import eslint from "@eslint/js";
import eslintConfigPrettier from "eslint-config-prettier";
import eslintPluginVue from "eslint-plugin-vue";
import globals from "globals";
import typescriptEslint from "typescript-eslint";
import eslint from '@eslint/js';
import eslintConfigPrettier from 'eslint-config-prettier';
import eslintPluginVue from 'eslint-plugin-vue';
import globals from 'globals';
import typescriptEslint from 'typescript-eslint';
export default typescriptEslint.config(
{ ignores: ["*.d.ts", "**/coverage", "**/dist"] },
{
extends: [
eslint.configs.recommended,
...typescriptEslint.configs.recommended,
...eslintPluginVue.configs["flat/recommended"],
],
files: ["**/*.{ts,vue}"],
languageOptions: {
ecmaVersion: "latest",
sourceType: "module",
globals: globals.browser,
parserOptions: {
parser: typescriptEslint.parser,
},
},
rules: {
// your rules
},
},
eslintConfigPrettier,
{ ignores: ['*.d.ts', '**/coverage', '**/dist'] },
{
extends: [
eslint.configs.recommended,
...typescriptEslint.configs.recommended,
...eslintPluginVue.configs['flat/recommended'],
],
files: ['**/*.{ts,vue}'],
languageOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
globals: globals.browser,
parserOptions: {
parser: typescriptEslint.parser,
},
},
rules: {
// your rules
},
},
eslintConfigPrettier
);

View File

@ -19,10 +19,10 @@
<div v-if="!args.disable_ip" class="d-none d-lg-block"><glances-plugin-ip
:data="data"></glances-plugin-ip>
</div>
<div v-if="!args.disable_uptime" class="d-none d-md-block"><glances-plugin-uptime
:data="data"></glances-plugin-uptime></div>
<div v-if="!args.disable_now" class="d-none d-xl-block"><glances-plugin-now
:data="data"></glances-plugin-now></div>
<div v-if="!args.disable_uptime" class="d-none d-md-block"><glances-plugin-uptime
:data="data"></glances-plugin-uptime></div>
</div>
</div>
<div class="d-flex d-none d-sm-block">
@ -90,335 +90,313 @@
</template>
<script>
import hotkeys from "hotkeys-js";
import GlancesHelp from "./components/help.vue";
import GlancesPluginAlert from "./components/plugin-alert.vue";
import GlancesPluginCloud from "./components/plugin-cloud.vue";
import GlancesPluginConnections from "./components/plugin-connections.vue";
import GlancesPluginContainers from "./components/plugin-containers.vue";
import GlancesPluginCpu from "./components/plugin-cpu.vue";
import GlancesPluginDiskio from "./components/plugin-diskio.vue";
import GlancesPluginFolders from "./components/plugin-folders.vue";
import GlancesPluginFs from "./components/plugin-fs.vue";
import GlancesPluginGpu from "./components/plugin-gpu.vue";
import GlancesPluginHostname from "./components/plugin-hostname.vue";
import GlancesPluginIp from "./components/plugin-ip.vue";
import GlancesPluginIrq from "./components/plugin-irq.vue";
import GlancesPluginLoad from "./components/plugin-load.vue";
import GlancesPluginMem from "./components/plugin-mem.vue";
import GlancesPluginMemswap from "./components/plugin-memswap.vue";
import GlancesPluginNetwork from "./components/plugin-network.vue";
import GlancesPluginNow from "./components/plugin-now.vue";
import GlancesPluginPercpu from "./components/plugin-percpu.vue";
import GlancesPluginPorts from "./components/plugin-ports.vue";
import GlancesPluginProcess from "./components/plugin-process.vue";
import GlancesPluginQuicklook from "./components/plugin-quicklook.vue";
import GlancesPluginRaid from "./components/plugin-raid.vue";
import GlancesPluginSensors from "./components/plugin-sensors.vue";
import GlancesPluginSmart from "./components/plugin-smart.vue";
import GlancesPluginSystem from "./components/plugin-system.vue";
import GlancesPluginUptime from "./components/plugin-uptime.vue";
import GlancesPluginVms from "./components/plugin-vms.vue";
import GlancesPluginWifi from "./components/plugin-wifi.vue";
import { GlancesStats } from "./services.js";
import { store } from "./store.js";
import hotkeys from 'hotkeys-js';
import { GlancesStats } from './services.js';
import { store } from './store.js';
import uiconfig from "./uiconfig.json";
import GlancesHelp from './components/help.vue';
import GlancesPluginAlert from './components/plugin-alert.vue';
import GlancesPluginCloud from './components/plugin-cloud.vue';
import GlancesPluginConnections from './components/plugin-connections.vue';
import GlancesPluginCpu from './components/plugin-cpu.vue';
import GlancesPluginDiskio from './components/plugin-diskio.vue';
import GlancesPluginContainers from './components/plugin-containers.vue';
import GlancesPluginFolders from './components/plugin-folders.vue';
import GlancesPluginFs from './components/plugin-fs.vue';
import GlancesPluginGpu from './components/plugin-gpu.vue';
import GlancesPluginHostname from './components/plugin-hostname.vue';
import GlancesPluginIp from './components/plugin-ip.vue';
import GlancesPluginIrq from './components/plugin-irq.vue';
import GlancesPluginLoad from './components/plugin-load.vue';
import GlancesPluginMem from './components/plugin-mem.vue';
import GlancesPluginMemswap from './components/plugin-memswap.vue';
import GlancesPluginNetwork from './components/plugin-network.vue';
import GlancesPluginNow from './components/plugin-now.vue';
import GlancesPluginPercpu from './components/plugin-percpu.vue';
import GlancesPluginPorts from './components/plugin-ports.vue';
import GlancesPluginProcess from './components/plugin-process.vue';
import GlancesPluginQuicklook from './components/plugin-quicklook.vue';
import GlancesPluginRaid from './components/plugin-raid.vue';
import GlancesPluginSmart from './components/plugin-smart.vue';
import GlancesPluginSensors from './components/plugin-sensors.vue';
import GlancesPluginSystem from './components/plugin-system.vue';
import GlancesPluginUptime from './components/plugin-uptime.vue';
import GlancesPluginVms from './components/plugin-vms.vue';
import GlancesPluginWifi from './components/plugin-wifi.vue';
import uiconfig from './uiconfig.json';
export default {
components: {
GlancesHelp,
GlancesPluginAlert,
GlancesPluginCloud,
GlancesPluginConnections,
GlancesPluginCpu,
GlancesPluginDiskio,
GlancesPluginContainers,
GlancesPluginFolders,
GlancesPluginFs,
GlancesPluginGpu,
GlancesPluginHostname,
GlancesPluginIp,
GlancesPluginIrq,
GlancesPluginLoad,
GlancesPluginMem,
GlancesPluginMemswap,
GlancesPluginNetwork,
GlancesPluginNow,
GlancesPluginPercpu,
GlancesPluginPorts,
GlancesPluginProcess,
GlancesPluginQuicklook,
GlancesPluginRaid,
GlancesPluginSensors,
GlancesPluginSmart,
GlancesPluginSystem,
GlancesPluginUptime,
GlancesPluginVms,
GlancesPluginWifi,
},
data() {
return {
store,
};
},
computed: {
args() {
return this.store.args || {};
},
config() {
return this.store.config || {};
},
data() {
return this.store.data || {};
},
dataLoaded() {
return this.store.data !== undefined;
},
hasGpu() {
return this.store.data.stats.gpu.length > 0;
},
isLinux() {
return this.store.data.isLinux;
},
title() {
const { data } = this;
const title =
(data.stats && data.stats.system && data.stats.system.hostname) || "";
return title ? `${title} - Glances` : "Glances";
},
topMenu() {
return this.config.outputs !== undefined &&
this.config.outputs.top_menu !== undefined
? this.config.outputs.top_menu.split(",")
: uiconfig.topMenu;
},
leftMenu() {
return this.config.outputs !== undefined &&
this.config.outputs.left_menu !== undefined
? this.config.outputs.left_menu.split(",")
: uiconfig.leftMenu;
},
},
watch: {
title() {
if (document) {
document.title = this.title;
}
},
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES["refresh-time"])
? parseInt(GLANCES["refresh-time"], 10)
: undefined;
GlancesStats.init(refreshTime);
this.setupHotKeys();
},
beforeUnmount() {
hotkeys.unbind();
},
methods: {
setupHotKeys() {
// a => Sort processes/containers automatically
hotkeys("a", () => {
this.store.args.sort_processes_key = null;
});
components: {
GlancesHelp,
GlancesPluginAlert,
GlancesPluginCloud,
GlancesPluginConnections,
GlancesPluginCpu,
GlancesPluginDiskio,
GlancesPluginContainers,
GlancesPluginFolders,
GlancesPluginFs,
GlancesPluginGpu,
GlancesPluginHostname,
GlancesPluginIp,
GlancesPluginIrq,
GlancesPluginLoad,
GlancesPluginMem,
GlancesPluginMemswap,
GlancesPluginNetwork,
GlancesPluginNow,
GlancesPluginPercpu,
GlancesPluginPorts,
GlancesPluginProcess,
GlancesPluginQuicklook,
GlancesPluginRaid,
GlancesPluginSensors,
GlancesPluginSmart,
GlancesPluginSystem,
GlancesPluginUptime,
GlancesPluginVms,
GlancesPluginWifi
},
data() {
return {
store
};
},
computed: {
args() {
return this.store.args || {};
},
config() {
return this.store.config || {};
},
data() {
return this.store.data || {};
},
dataLoaded() {
return this.store.data !== undefined;
},
hasGpu() {
return this.store.data.stats.gpu.length > 0;
},
isLinux() {
return this.store.data.isLinux;
},
title() {
const { data } = this;
const title = (data.stats && data.stats.system && data.stats.system.hostname) || '';
return title ? `${title} - Glances` : 'Glances';
},
leftMenu() {
return this.config.outputs !== undefined && this.config.outputs.left_menu !== undefined
? this.config.outputs.left_menu.split(',')
: uiconfig.leftMenu;
}
},
watch: {
title() {
if (document) {
document.title = this.title;
}
}
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES['refresh-time'])
? parseInt(GLANCES['refresh-time'], 10)
: undefined;
GlancesStats.init(refreshTime);
this.setupHotKeys();
},
beforeUnmount() {
hotkeys.unbind();
},
methods: {
setupHotKeys() {
// a => Sort processes/containers automatically
hotkeys('a', () => {
this.store.args.sort_processes_key = null;
});
// c => Sort processes/containers by CPU%
hotkeys("c", () => {
this.store.args.sort_processes_key = "cpu_percent";
});
// c => Sort processes/containers by CPU%
hotkeys('c', () => {
this.store.args.sort_processes_key = 'cpu_percent';
});
// m => Sort processes/containers by MEM%
hotkeys("m", () => {
this.store.args.sort_processes_key = "memory_percent";
});
// m => Sort processes/containers by MEM%
hotkeys('m', () => {
this.store.args.sort_processes_key = 'memory_percent';
});
// u => Sort processes/containers by user
hotkeys("u", () => {
this.store.args.sort_processes_key = "username";
});
// u => Sort processes/containers by user
hotkeys('u', () => {
this.store.args.sort_processes_key = 'username';
});
// p => Sort processes/containers by name
hotkeys("p", () => {
this.store.args.sort_processes_key = "name";
});
// p => Sort processes/containers by name
hotkeys('p', () => {
this.store.args.sort_processes_key = 'name';
});
// i => Sort processes/containers by I/O rate
hotkeys("i", () => {
this.store.args.sort_processes_key = "io_counters";
});
// i => Sort processes/containers by I/O rate
hotkeys('i', () => {
this.store.args.sort_processes_key = 'io_counters';
});
// t => Sort processes/containers by time
hotkeys("t", () => {
this.store.args.sort_processes_key = "timemillis";
});
// t => Sort processes/containers by time
hotkeys('t', () => {
this.store.args.sort_processes_key = 'timemillis';
});
// A => Enable/disable AMPs
hotkeys("shift+A", () => {
this.store.args.disable_amps = !this.store.args.disable_amps;
});
// A => Enable/disable AMPs
hotkeys('shift+A', () => {
this.store.args.disable_amps = !this.store.args.disable_amps;
});
// d => Show/hide disk I/O stats
hotkeys("d", () => {
this.store.args.disable_diskio = !this.store.args.disable_diskio;
});
// d => Show/hide disk I/O stats
hotkeys('d', () => {
this.store.args.disable_diskio = !this.store.args.disable_diskio;
});
// Q => Show/hide IRQ
hotkeys("shift+Q", () => {
this.store.args.enable_irq = !this.store.args.enable_irq;
});
// Q => Show/hide IRQ
hotkeys('shift+Q', () => {
this.store.args.enable_irq = !this.store.args.enable_irq;
});
// f => Show/hide filesystem stats
hotkeys("f", () => {
this.store.args.disable_fs = !this.store.args.disable_fs;
});
// f => Show/hide filesystem stats
hotkeys('f', () => {
this.store.args.disable_fs = !this.store.args.disable_fs;
});
// j => Accumulate processes by program
hotkeys("j", () => {
this.store.args.programs = !this.store.args.programs;
});
// j => Accumulate processes by program
hotkeys('j', () => {
this.store.args.programs = !this.store.args.programs;
});
// k => Show/hide connections stats
hotkeys("k", () => {
this.store.args.disable_connections =
!this.store.args.disable_connections;
});
// k => Show/hide connections stats
hotkeys('k', () => {
this.store.args.disable_connections = !this.store.args.disable_connections;
});
// n => Show/hide network stats
hotkeys("n", () => {
this.store.args.disable_network = !this.store.args.disable_network;
});
// n => Show/hide network stats
hotkeys('n', () => {
this.store.args.disable_network = !this.store.args.disable_network;
});
// s => Show/hide sensors stats
hotkeys("s", () => {
this.store.args.disable_sensors = !this.store.args.disable_sensors;
});
// s => Show/hide sensors stats
hotkeys('s', () => {
this.store.args.disable_sensors = !this.store.args.disable_sensors;
});
// 2 => Show/hide left sidebar
hotkeys("2", () => {
this.store.args.disable_left_sidebar =
!this.store.args.disable_left_sidebar;
});
// 2 => Show/hide left sidebar
hotkeys('2', () => {
this.store.args.disable_left_sidebar = !this.store.args.disable_left_sidebar;
});
// z => Enable/disable processes stats
hotkeys("z", () => {
this.store.args.disable_process = !this.store.args.disable_process;
});
// z => Enable/disable processes stats
hotkeys('z', () => {
this.store.args.disable_process = !this.store.args.disable_process;
});
// S => Enable/disable short processes name
hotkeys("shift+S", () => {
this.store.args.process_short_name =
!this.store.args.process_short_name;
});
// S => Enable/disable short processes name
hotkeys('shift+S', () => {
this.store.args.process_short_name = !this.store.args.process_short_name;
});
// D => Enable/disable containers stats
hotkeys("shift+D", () => {
this.store.args.disable_containers =
!this.store.args.disable_containers;
});
// D => Enable/disable containers stats
hotkeys('shift+D', () => {
this.store.args.disable_containers = !this.store.args.disable_containers;
});
// b => Bytes or bits for network I/O
hotkeys("b", () => {
this.store.args.byte = !this.store.args.byte;
});
// b => Bytes or bits for network I/O
hotkeys('b', () => {
this.store.args.byte = !this.store.args.byte;
});
// 'B' => Switch between bit/s and IO/s for Disk IO
hotkeys("shift+B", () => {
this.store.args.diskio_iops = !this.store.args.diskio_iops;
if (this.store.args.diskio_iops) {
this.store.args.diskio_latency = false;
}
});
// 'B' => Switch between bit/s and IO/s for Disk IO
hotkeys('shift+B', () => {
this.store.args.diskio_iops = !this.store.args.diskio_iops;
});
// 'L' => Switch to latency for Disk IO
hotkeys("shift+L", () => {
this.store.args.diskio_latency = !this.store.args.diskio_latency;
if (this.store.args.diskio_latency) {
this.store.args.diskio_iops = false;
}
});
// l => Show/hide alert logs
hotkeys('l', () => {
this.store.args.disable_alert = !this.store.args.disable_alert;
});
// l => Show/hide alert logs
hotkeys("l", () => {
this.store.args.disable_alert = !this.store.args.disable_alert;
});
// 1 => Global CPU or per-CPU stats
hotkeys('1', () => {
this.store.args.percpu = !this.store.args.percpu;
});
// 1 => Global CPU or per-CPU stats
hotkeys("1", () => {
this.store.args.percpu = !this.store.args.percpu;
});
// h => Show/hide this help screen
hotkeys('h', () => {
this.store.args.help_tag = !this.store.args.help_tag;
});
// h => Show/hide this help screen
hotkeys("h", () => {
this.store.args.help_tag = !this.store.args.help_tag;
});
// T => View network I/O as combination
hotkeys('shift+T', () => {
this.store.args.network_sum = !this.store.args.network_sum;
});
// T => View network I/O as combination
hotkeys("shift+T", () => {
this.store.args.network_sum = !this.store.args.network_sum;
});
// U => View cumulative network I/O
hotkeys('shift+U', () => {
this.store.args.network_cumul = !this.store.args.network_cumul;
});
// U => View cumulative network I/O
hotkeys("shift+U", () => {
this.store.args.network_cumul = !this.store.args.network_cumul;
});
// F => Show filesystem free space
hotkeys('shift+F', () => {
this.store.args.fs_free_space = !this.store.args.fs_free_space;
});
// F => Show filesystem free space
hotkeys("shift+F", () => {
this.store.args.fs_free_space = !this.store.args.fs_free_space;
});
// 3 => Enable/disable quick look plugin
hotkeys('3', () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
});
// 3 => Enable/disable quick look plugin
hotkeys("3", () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
});
// 6 => Enable/disable mean gpu
hotkeys('6', () => {
this.store.args.meangpu = !this.store.args.meangpu;
});
// 6 => Enable/disable mean gpu
hotkeys("6", () => {
this.store.args.meangpu = !this.store.args.meangpu;
});
// G => Enable/disable gpu
hotkeys('shift+G', () => {
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
// G => Enable/disable gpu
hotkeys("shift+G", () => {
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
hotkeys('5', () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
this.store.args.disable_cpu = !this.store.args.disable_cpu;
this.store.args.disable_mem = !this.store.args.disable_mem;
this.store.args.disable_memswap = !this.store.args.disable_memswap;
this.store.args.disable_load = !this.store.args.disable_load;
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
hotkeys("5", () => {
this.store.args.disable_quicklook = !this.store.args.disable_quicklook;
this.store.args.disable_cpu = !this.store.args.disable_cpu;
this.store.args.disable_mem = !this.store.args.disable_mem;
this.store.args.disable_memswap = !this.store.args.disable_memswap;
this.store.args.disable_load = !this.store.args.disable_load;
this.store.args.disable_gpu = !this.store.args.disable_gpu;
});
// I => Show/hide IP module
hotkeys('shift+I', () => {
this.store.args.disable_ip = !this.store.args.disable_ip;
});
// I => Show/hide IP module
hotkeys("shift+I", () => {
this.store.args.disable_ip = !this.store.args.disable_ip;
});
// P => Enable/disable ports module
hotkeys('shift+P', () => {
this.store.args.disable_ports = !this.store.args.disable_ports;
});
// P => Enable/disable ports module
hotkeys("shift+P", () => {
this.store.args.disable_ports = !this.store.args.disable_ports;
});
// V => Enable/disable VMs stats
hotkeys('shift+V', () => {
this.store.args.disable_vms = !this.store.args.disable_vms;
});
// V => Enable/disable VMs stats
hotkeys("shift+V", () => {
this.store.args.disable_vms = !this.store.args.disable_vms;
});
// 'W' > Enable/Disable Wifi plugin
hotkeys('shift+W', () => {
this.store.args.disable_wifi = !this.store.args.disable_wifi;
});
// 'W' > Enable/Disable Wifi plugin
hotkeys("shift+W", () => {
this.store.args.disable_wifi = !this.store.args.disable_wifi;
});
// 0 => Enable/disable IRIX mode (see issue #3158)
hotkeys("0", () => {
this.store.args.disable_irix = !this.store.args.disable_irix;
});
},
},
// 0 => Enable/disable IRIX mode (see issue #3158)
hotkeys('0', () => {
this.store.args.disable_irix = !this.store.args.disable_irix;
});
}
}
};
</script>

View File

@ -56,59 +56,54 @@
// import { store } from './store.js';
export default {
data() {
return {
servers: undefined,
};
},
computed: {
serversListLoaded() {
return this.servers !== undefined;
},
},
created() {
this.updateServersList();
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES["refresh-time"])
? parseInt(GLANCES["refresh-time"], 10)
: undefined;
this.interval = setInterval(this.updateServersList, refreshTime * 1000);
},
unmounted() {
clearInterval(this.interval);
},
methods: {
updateServersList() {
fetch("api/4/serverslist", { method: "GET" })
.then((response) => response.json())
.then((response) => (this.servers = response));
},
formatNumber(value) {
if (typeof value === "number" && !isNaN(value)) {
return value.toFixed(1);
}
return value;
},
goToGlances(server) {
if (server.protocol === "rpc") {
alert(
"You just click on a Glances RPC server.\nPlease open a terminal and enter the following command line:\n\nglances -c " +
String(server.ip) +
" -p " +
String(server.port),
);
} else {
window.location.href = server.uri;
}
},
getDecoration(server, column) {
if (server[column + "_decoration"] === undefined) {
return;
}
return server[column + "_decoration"].replace("_LOG", "").toLowerCase();
},
},
data() {
return {
servers: undefined,
};
},
computed: {
serversListLoaded() {
return this.servers !== undefined;
},
},
created() {
this.updateServersList();
},
mounted() {
const GLANCES = window.__GLANCES__ || {};
const refreshTime = isFinite(GLANCES['refresh-time'])
? parseInt(GLANCES['refresh-time'], 10)
: undefined;
this.interval = setInterval(this.updateServersList, refreshTime * 1000)
},
unmounted() {
clearInterval(this.interval)
},
methods: {
updateServersList() {
fetch('api/4/serverslist', { method: 'GET' })
.then((response) => response.json())
.then((response) => (this.servers = response));
},
formatNumber(value) {
if (typeof value === "number" && !isNaN(value)) {
return value.toFixed(1);
}
return value;
},
goToGlances(server) {
if (server.protocol === 'rpc') {
alert("You just click on a Glances RPC server.\nPlease open a terminal and enter the following command line:\n\nglances -c " + String(server.ip) + " -p " + String(server.port))
} else {
window.location.href = server.uri;
}
},
getDecoration(server, column) {
if (server[column + '_decoration'] === undefined) {
return;
}
return server[column + '_decoration'].replace('_LOG', '').toLowerCase();
}
}
};
</script>

View File

@ -1,17 +1,17 @@
/* global module */
if (module.hot) {
module.hot.accept();
module.hot.accept();
}
import "../css/custom.scss";
import "../css/style.scss";
import '../css/custom.scss';
import '../css/style.scss';
import * as bootstrap from "bootstrap";
import * as bootstrap from 'bootstrap';
import { createApp } from "vue";
import App from "./App.vue";
import { createApp } from 'vue';
import App from './App.vue';
import * as filters from "./filters.js";
const app = createApp(App);
app.config.globalProperties.$filters = filters;
app.mount("#app");
app.mount('#app');

Some files were not shown because too many files have changed in this diff Show More