Merge branch 'develop'

This commit is contained in:
nicolargo 2025-11-02 17:02:14 +01:00
commit f38ff27faa
140 changed files with 6195 additions and 1182 deletions

View File

@ -8,10 +8,8 @@
!/glances/outputs/static
# Include Requirements files
!/requirements.txt
!/all-requirements.txt
!/docker-requirements.txt
!/webui-requirements.txt
!/optional-requirements.txt
# Include Config file
!/docker-compose/glances.conf
@ -19,3 +17,6 @@
# Include Binary file
!/docker-bin.sh
# Include TOML file
!/pyproject.toml

View File

@ -12,9 +12,9 @@ jobs:
if: github.event_name == 'push'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: "3.13"
- name: Install pypa/build
@ -45,7 +45,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
@ -54,6 +54,7 @@ jobs:
with:
skip-existing: true
attestations: false
print-hash: true
pypi_test:
name: Publish Python 🐍 distribution 📦 to TestPyPI
@ -69,7 +70,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/

View File

@ -63,7 +63,7 @@ jobs:
tag: ${{ fromJson(needs.create_docker_images_list.outputs.tags) }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Retrieve Repository Docker metadata
id: docker_meta

View File

@ -11,7 +11,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Run Trivy vulnerability scanner in repo mode
uses: aquasecurity/trivy-action@master

View File

@ -10,7 +10,7 @@ jobs:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v9
- uses: actions/stale@v10
with:
days-before-issue-stale: 90
days-before-issue-close: -1

View File

@ -22,7 +22,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL

View File

@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Check formatting with Ruff
uses: chartboost/ruff-action@v1
@ -37,14 +37,14 @@ jobs:
runs-on: ubuntu-24.04
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
@ -70,14 +70,14 @@ jobs:
runs-on: windows-2025
strategy:
matrix:
# Windows-curses not available for Python 3.13 for the moment
python-version: ["3.9", "3.10", "3.11", "3.12"]
# Windows-curses not available for Python 3.14 for the moment
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
@ -96,7 +96,7 @@ jobs:
needs: source-code-checks
# https://github.com/actions/runner-images?tab=readme-ov-file#available-images
runs-on: macos-14
runs-on: macos-15
strategy:
matrix:
# Only test the latest stable version
@ -104,10 +104,10 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'

View File

@ -14,9 +14,9 @@ jobs:
# See supported Node.js release schedule at https://nodejs.org/en/about/releases/
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Glances will be build with Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
uses: actions/setup-node@v5
with:
node-version: ${{ matrix.node-version }}
cache: 'npm'

4
.gitignore vendored
View File

@ -63,7 +63,9 @@ bower_components/
/*_source.tar.bz2
# Virtual env
/venv*/
.venv/
uv.lock
.python-version
# Test
.coverage

View File

@ -1,6 +1,6 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
rev: v6.0.0
hooks:
- id: check-ast
- id: check-docstring-first
@ -15,8 +15,23 @@ repos:
- id: requirements-txt-fixer
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.11.5
rev: v0.14.1
hooks:
- id: ruff-format
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
# Run the linter.
- id: ruff-check
types_or: [ python, pyi ]
args: [ --fix, --exit-non-zero-on-fix ]
# Run the formatter.
- id: ruff-format
types_or: [ python, pyi ]
- repo: local
hooks:
# test duplicate line at the end of file with a custom script
# /bin/bash tests-data/tools/find-duplicate-lines.sh
- id: find-duplicate-lines
name: find duplicate lines at the end of file
entry: bash tests-data/tools/find-duplicate-lines.sh
language: system
types: [python]
pass_filenames: false

View File

@ -31,4 +31,4 @@ sphinx:
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: doc-requirements.txt
- requirements: dev-requirements.txt

View File

@ -6,6 +6,7 @@ include README.rst
include SECURITY.md
include conf/glances.conf
include requirements.txt
include all-requirements.txt
recursive-include docs *
recursive-include glances *.py
recursive-include glances/outputs/static *

212
Makefile
View File

@ -1,17 +1,6 @@
PORT ?= 8008
venv_full:= venv/bin
venv_min := venv-min/bin
CONF := conf/glances.conf
PIP := $(venv_full)/pip
PYTHON := $(venv_full)/python
PYTEST := $(venv_full)/python -m pytest
LASTTAG = $(shell git describe --tags --abbrev=0)
VENV_TYPES := full min
VENV_PYTHON := $(VENV_TYPES:%=venv-%-python)
VENV_UPG := $(VENV_TYPES:%=venv-%-upgrade)
VENV_DEPS := $(VENV_TYPES:%=venv-%)
VENV_INST_UPG := $(VENV_DEPS) $(VENV_UPG)
PORT ?= 8008
CONF := conf/glances.conf
LASTTAG = $(shell git describe --tags --abbrev=0)
IMAGES_TYPES := full minimal
DISTROS := alpine ubuntu
@ -27,10 +16,15 @@ DOCKER_SOCK ?= /var/run/docker.sock
DOCKER_SOCKS := -v $(PODMAN_SOCK):$(PODMAN_SOCK):ro -v $(DOCKER_SOCK):$(DOCKER_SOCK):ro
DOCKER_OPTS := --rm -e TZ="${TZ}" -e GLANCES_OPT="" --pid host --network host
# User-friendly check for uv
ifeq ($(shell which uv >/dev/null 2>&1; echo $$?), 1)
$(error The 'uv' command was not found. Make sure you have Astral Uv installed, then set the UV environment variable to point to the full path of the 'uv' executable. Alternatively more information with make install-uv)
endif
# if the command is only `make`, the default tasks will be the printing of the help.
.DEFAULT_GOAL := help
.PHONY: help test docs docs-server venv venv-min
.PHONY: help test docs docs-server venv
help: ## List all make commands available
@grep -E '^[\.a-zA-Z_%-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@ -44,76 +38,88 @@ help: ## List all make commands available
# Virtualenv
# ===================================================================
venv-%-upgrade: UPGRADE = --upgrade
install-uv: ## Instructions to install the UV tool
@echo "Install the UV tool (https://astral.sh/uv/)"
@echo "Please install the UV tool manually"
@echo "For example with: curl -LsSf https://astral.sh/uv/install.sh | sh"
@echo "Or via a package manager of your distribution"
@echo "For example for Snap: snap install astral-uv"
define DEFINE_VARS_FOR_TYPE
venv-$(TYPE) venv-$(TYPE)-upgrade: VIRTUAL_ENV = $(venv_$(TYPE))
endef
upgrade-uv: ## Upgrade the UV tool
uv self update
$(foreach TYPE,$(VENV_TYPES),$(eval $(DEFINE_VARS_FOR_TYPE)))
venv: ## Create the virtualenv with all dependencies
uv sync --all-extras --no-group dev
$(VENV_PYTHON): venv-%-python:
virtualenv -p python3 $(if $(filter full,$*),venv,venv-$*)
venv-upgrade venv-switch-to-full: ## Upgrade the virtualenv with all dependencies
uv sync --upgrade --all-extras
$(VENV_INST_UPG): venv-%:
$(if $(UPGRADE),$(VIRTUAL_ENV)/pip install --upgrade pip,)
$(foreach REQ,$(REQS), $(VIRTUAL_ENV)/pip install $(UPGRADE) -r $(REQ);)
$(if $(PRE_COMMIT),$(VIRTUAL_ENV)/pre-commit install --hook-type pre-commit,)
venv-min: ## Create the virtualenv with minimal dependencies
uv sync
venv-python: $(VENV_PYTHON) ## Install all Python 3 venv
venv: $(VENV_DEPS) ## Install all Python 3 dependencies
venv-upgrade: $(VENV_UPG) ## Upgrade all Python 3 dependencies
venv-upgrade-min venv-switch-to-min: ## Upgrade the virtualenv with minimal dependencies
uv sync --upgrade
# For full installation (with optional dependencies)
venv-clean: ## Remove the virtualenv
rm -rf .venv
venv-full venv-full-upgrade: REQS = requirements.txt optional-requirements.txt dev-requirements.txt doc-requirements.txt
venv-dev: ## Create the virtualenv with dev dependencies
uv sync --dev --all-extras
uv run pre-commit install --hook-type pre-commit
venv-full-python: ## Install Python 3 venv
venv-full: venv-python ## Install Python 3 run-time
venv-full-upgrade: ## Upgrade Python 3 run-time dependencies
venv-full: PRE_COMMIT = 1
# ===================================================================
# Requirements
#
# Note: the --no-hashes option should be used because pip (in CI) has
# issues with hashes.
# ===================================================================
# For minimal installation (without optional dependencies)
requirements-min: ## Generate the requirements.txt files (minimal dependencies)
uv export --no-emit-workspace --no-hashes --no-group dev --output-file requirements.txt
venv-min venv-min-upgrade: REQS = requirements.txt dev-requirements.txt doc-requirements.txt
requirements-all: ## Generate the all-requirements.txt files (all dependencies)
uv export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
venv-min-python: ## Install Python 3 venv minimal
venv-min: venv-min-python ## Install Python 3 minimal run-time dependencies
venv-min-upgrade: ## Upgrade Python 3 minimal run-time dependencies
requirements-docker: ## Generate the docker-requirements.txt files (Docker specific dependencies)
uv export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
requirements-dev: ## Generate the dev-requirements.txt files (dev dependencies)
uv export --no-hashes --only-dev --output-file dev-requirements.txt
requirements: requirements-min requirements-all requirements-dev requirements-docker ## Generate all the requirements files
requirements-upgrade: venv-upgrade requirements ## Upgrade the virtualenv and regenerate all the requirements files
# ===================================================================
# Tests
# ===================================================================
test: ## Run All unit tests
$(PYTEST)
uv run pytest
test-core: ## Run Core unit tests
$(PYTEST) tests/test_core.py
uv run pytest tests/test_core.py
test-api: ## Run API unit tests
uv run pytest tests/test_api.py
test-memoryleak: ## Run Memory-leak unit tests
$(PYTEST) tests/test_memoryleak.py
uv run pytest tests/test_memoryleak.py
test-perf: ## Run Perf unit tests
$(PYTEST) tests/test_perf.py
uv run pytest tests/test_perf.py
test-restful: ## Run Restful API unit tests
$(PYTEST) tests/test_restful.py
uv run pytest tests/test_restful.py
test-webui: ## Run WebUI unit tests
$(PYTEST) tests/test_webui.py
uv run pytest tests/test_webui.py
test-xmlrpc: ## Run XMLRPC API unit tests
$(PYTEST) tests/test_xmlrpc.py
uv run pytest tests/test_xmlrpc.py
test-with-upgrade: venv-upgrade test ## Upgrade deps and run unit tests
test-min: ## Run core unit tests in minimal environment
$(venv_min)/python -m pytest tests/test_core.py
test-min-with-upgrade: venv-min-upgrade ## Upgrade deps and run unit tests in minimal environment
$(venv_min)/python -m pytest tests/test_core.py
test-export-csv: ## Run interface tests with CSV
/bin/bash ./tests/test_export_csv.sh
@ -129,26 +135,29 @@ test-export-influxdb-v3: ## Run interface tests with InfluxDB version 3 (Core)
test-export-timescaledb: ## Run interface tests with TimescaleDB
/bin/bash ./tests/test_export_timescaledb.sh
test-export: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb## Tests all exports
test-exports: test-export-csv test-export-json test-export-influxdb-v1 test-export-influxdb-v3 test-export-timescaledb ## Tests all exports
# ===================================================================
# Linters, profilers and cyber security
# ===================================================================
find-duplicate-lines:
/bin/bash tests-data/tools/find-duplicate-lines.sh
format: ## Format the code
$(venv_full)/python -m ruff format .
uv run ruff format .
lint: ## Lint the code.
$(venv_full)/python -m ruff check . --fix
uv run ruff check . --fix
lint-readme: ## Lint the main README.rst file
$(venv_full)/python -m rstcheck README.rst
uv run rstcheck README.rst
codespell: ## Run codespell to fix common misspellings in text files
$(venv_full)/codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
uv run codespell -S .git,./docs/_build,./Glances.egg-info,./venv*,./glances/outputs,*.svg -L hart,bu,te,statics -w
semgrep: ## Run semgrep to find bugs and enforce code standards
$(venv_full)/semgrep scan --config=auto
uv run semgrep scan --config=auto
profiling-%: SLEEP = 3
profiling-%: TIMES = 30
@ -162,27 +171,27 @@ endef
profiling-gprof: CPROF = glances.cprof
profiling-gprof: ## Callgraph profiling (need "apt install graphviz")
$(DISPLAY-BANNER)
$(PYTHON) -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
$(venv_full)/gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
uv run python -m cProfile -o $(CPROF) run-venv.py -C $(CONF) --stop-after $(TIMES)
uv run gprof2dot -f pstats $(CPROF) | dot -Tsvg -o $(OUT_DIR)/glances-cgraph.svg
rm -f $(CPROF)
profiling-pyinstrument: ## PyInstrument profiling
$(DISPLAY-BANNER)
$(PIP) install pyinstrument
$(PYTHON) -m pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
uv add pyinstrument
uv run pyinstrument -r html -o $(OUT_DIR)/glances-pyinstrument.html -m glances -C $(CONF) --stop-after $(TIMES)
profiling-pyspy: ## Flame profiling
$(DISPLAY-BANNER)
$(venv_full)/py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- $(PYTHON) run-venv.py -C $(CONF) --stop-after $(TIMES)
uv run py-spy record -o $(OUT_DIR)/glances-flame.svg -d 60 -s -- uv run python run-venv.py -C $(CONF) --stop-after $(TIMES)
profiling: profiling-gprof profiling-pyinstrument profiling-pyspy ## Profiling of the Glances software
trace-malloc: ## Trace the malloc() calls
@echo "Malloc test is running, please wait ~30 secondes..."
$(PYTHON) -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
uv run python -m glances -C $(CONF) --trace-malloc --stop-after 15 --quiet
memory-leak: ## Profile memory leaks
$(PYTHON) -m glances -C $(CONF) --memory-leak
uv run python -m glances -C $(CONF) --memory-leak
memory-profiling: TIMES = 2400
memory-profiling: PROFILE = mprofile_*.dat
@ -191,30 +200,31 @@ memory-profiling: ## Profile memory usage
@echo "It's a very long test (~4 hours)..."
rm -f $(PROFILE)
@echo "1/2 - Start memory profiling with the history option enable"
$(venv_full)/mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
$(venv_full)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
uv run mprof run -T 1 -C run-venv.py -C $(CONF) --stop-after $(TIMES) --quiet
uv run mprof plot --output $(OUT_DIR)/glances-memory-profiling-with-history.png
rm -f $(PROFILE)
@echo "2/2 - Start memory profiling with the history option disable"
$(venv_full)/mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
$(venv_full)/mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
uv run mprof run -T 1 -C run-venv.py -C $(CONF) --disable-history --stop-after $(TIMES) --quiet
uv run mprof plot --output $(OUT_DIR)/glances-memory-profiling-without-history.png
rm -f $(PROFILE)
# Trivy installation: https://aquasecurity.github.io/trivy/latest/getting-started/installation/
trivy: ## Run Trivy to find vulnerabilities in container images
trivy fs .
uv run trivy fs .
# ===================================================================
# Docs
# ===================================================================
docs: ## Create the documentation
$(PYTHON) ./generate_openapi.py
$(PYTHON) -m glances -C $(CONF) --api-doc > ./docs/api.rst
uv run python -m glances -C $(CONF) --api-doc > ./docs/api/python.rst
uv run python ./generate_openapi.py
uv run python -m glances -C $(CONF) --api-restful-doc > ./docs/api/restful.rst
cd docs && ./build.sh && cd ..
docs-server: docs ## Start a Web server to serve the documentation
(sleep 2 && sensible-browser "http://localhost:$(PORT)") &
cd docs/_build/html/ && ../../../venv/bin/python -m http.server $(PORT)
cd docs/_build/html/ && uv run python -m http.server $(PORT)
release-note: ## Generate release note
git --no-pager log $(LASTTAG)..HEAD --first-parent --pretty=format:"* %s"
@ -231,17 +241,19 @@ install: ## Open a Web Browser to the installation procedure
webui webui%: DIR = glances/outputs/static/
webui: ## Build the Web UI
$(PYTHON) -c 'import json; from glances.outputs.glances_curses import _GlancesCurses; print(json.dumps({ "leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"]}, indent=4))' > ./glances/outputs/static/js/uiconfig.json
webui-gen-config: ## Generate the Web UI config file
uv run python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
webui: webui-gen-config ## Build the Web UI
cd $(DIR) && npm ci && npm run build
webui-audit: ## Audit the Web UI
cd $(DIR) && npm audit
webui-audit-fix: ## Fix audit the Web UI
webui-audit-fix: webui-gen-config ## Fix audit the Web UI
cd $(DIR) && npm audit fix && npm ci && npm run build
webui-update: ## Update JS dependencies
webui-update: webui-gen-config ## Update JS dependencies
cd $(DIR) && npm update --save && npm ci && npm run build
# ===================================================================
@ -250,7 +262,7 @@ webui-update: ## Update JS dependencies
flatpak: venv-upgrade ## Generate FlatPack JSON file
git clone https://github.com/flatpak/flatpak-builder-tools.git
$(PYTHON) ./flatpak-builder-tools/pip/flatpak-pip-generator glances
uv run python ./flatpak-builder-tools/pip/flatpak-pip-generator glances
rm -rf ./flatpak-builder-tools
@echo "Now follow: https://github.com/flathub/flathub/wiki/App-Submission"
@ -289,28 +301,22 @@ docker-ubuntu-dev: ## Generate local docker image (Ubuntu dev)
# ===================================================================
run: ## Start Glances in console mode (also called standalone)
$(PYTHON) -m glances -C $(CONF)
uv run python -m glances -C $(CONF)
run-debug: ## Start Glances in debug console mode (also called standalone)
$(PYTHON) -m glances -C $(CONF) -d
uv run python -m glances -C $(CONF) -d
run-local-conf: ## Start Glances in console mode with the system conf file
$(PYTHON) -m glances
uv run python -m glances
run-local-conf-hide-public: ## Start Glances in console mode with the system conf file and hide public information
$(PYTHON) -m glances --hide-public-info
run-min: ## Start minimal Glances in console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF)
run-min-debug: ## Start minimal Glances in debug console mode (also called standalone)
$(venv_min)/python -m glances -C $(CONF) -d
run-min-local-conf: ## Start minimal Glances in console mode with the system conf file
$(venv_min)/python -m glances
uv run python -m glances --hide-public-info
run-like-htop: ## Start Glances with the same features than Htop
$(venv_min)/python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
uv run python -m glances --disable-plugin network,ports,wifi,connections,diskio,fs,irq,folders,raid,smart,sensors,vms,containers,ip,amps --disable-left-sidebar
run-fetch: ## Start Glances in fetch mode
uv run python -m glances --fetch
$(DOCKER_RUNTIMES): run-docker-%:
$(DOCKER_RUN) $(DOCKER_OPTS) $(DOCKER_SOCKS) -it glances:local-$*
@ -323,31 +329,31 @@ run-docker-ubuntu-full: ## Start Glances Ubuntu Docker full in console mode
run-docker-ubuntu-dev: ## Start Glances Ubuntu Docker dev in console mode
run-webserver: ## Start Glances in Web server mode
$(PYTHON) -m glances -C $(CONF) -w
uv run python -m glances -C $(CONF) -w
run-webserver-local-conf: ## Start Glances in Web server mode with the system conf file
$(PYTHON) -m glances -w
uv run python -m glances -w
run-webserver-local-conf-hide-public: ## Start Glances in Web server mode with the system conf file and hide public info
$(PYTHON) -m glances -w --hide-public-info
uv run python -m glances -w --hide-public-info
run-restapiserver: ## Start Glances in REST API server mode
$(PYTHON) -m glances -C $(CONF) -w --disable-webui
uv run python -m glances -C $(CONF) -w --disable-webui
run-server: ## Start Glances in server mode (RPC)
$(PYTHON) -m glances -C $(CONF) -s
uv run python -m glances -C $(CONF) -s
run-client: ## Start Glances in client mode (RPC)
$(PYTHON) -m glances -C $(CONF) -c localhost
uv run python -m glances -C $(CONF) -c localhost
run-browser: ## Start Glances in browser mode (RPC)
$(PYTHON) -m glances -C $(CONF) --browser
uv run python -m glances -C $(CONF) --browser
run-web-browser: ## Start Web Central Browser
$(PYTHON) -m glances -C $(CONF) -w --browser
uv run python -m glances -C $(CONF) -w --browser
run-issue: ## Start Glances in issue mode
$(PYTHON) -m glances -C $(CONF) --issue
uv run python -m glances -C $(CONF) --issue
run-multipass: ## Install and start Glances in a VM (only available on Ubuntu with multipass already installed)
multipass launch -n glances-on-lts lts
@ -357,4 +363,4 @@ run-multipass: ## Install and start Glances in a VM (only available on Ubuntu wi
multipass delete glances-on-lts
show-version: ## Show Glances version number
$(PYTHON) -m glances -C $(CONF) -V
uv run python -m glances -C $(CONF) -V

View File

@ -2,6 +2,87 @@
Glances ChangeLog
==============================================================================
=============
Version 4.4.0
=============
Breaking changes:
* A new Python API is now available to use Glances as a Python lib in your hown development #3237
* In the process list, the long command line is now truncated by default. Use the arrow keys to show the full command line. SHIFT + arrow keys are used to switch between column sorts (TUI).
* Prometheus export format is now more user friendly (see detail in #3283)
Enhancements:
* Make a Glances API in order to use Glances as a Python lib #3237
* Add a new --fetch (neofetch like) option to display a snapshot of the current system status #3281
* Show used port in container section #2054
* Show long command line with arrow key #1553
* Sensors plugin refresh by default every 10 seconds
* Do not call update if a call is done to a specific plugin through the API #3033
* [UI] Process virtual memory display can be disable by configuration #3299
* Choose between used or available in the mem plugin #3288
* [Experimental] Add export to DuckDB database #3205
* Add Disk I/O Latency stats #1070
* Filter fields to export #3258
* Remove .keys() from loops over dicts #3253
* Remove iterator helpers #3252
Bug corrected:
* [MACOS] Glances not showing Processes on MacOS #3100
* Last dev build broke Homepage API calls ? only 1 widget still working #3322
* Cloud plugin always generate communication with 169.254.169.254, even if the plugin is disabled #3316
* API response delay (3+ minutes) when VMs are running #3317
* [WINDOWS] Glances do not display CPU stat correctly #3155
* Glances hangs if network device (NFS) is no available #3290
* Fix prometheus export format #3283
* Issue #3279 zfs cache and memory math issues #3289
* [MACOS] Glances crashes when I try to filter #3266
* Glances hang when killing process with muliple CTRL-C #3264
* Issues after disabling system and processcount plugins #3248
* Headers missing from predefined fields in TUI browser machine list #3250
* Add another check for the famous Netifaces issue - Related to #3219
* Key error 'type' in server_list_static.py (load_server_list) #3247
Continious integration and documentation:
* Glances now use uv for the dev environment #3025
* Glances is compatible with Python 3.14 #3319
* Glances provides requirements files with specific versions for each release
* Requirements files are now generated dynamically with the make requirements or requirements-upgrade target
* Add duplicate line check in pre-commit (strange behavor with some VScode extension)
* Solve issue with multiprocessing exception with Snap package
* Add a test script for identify CPU consumption of sensor plugin
* Refactor port to take into account netifaces2
* Correct issue with Chrome driver in WebUI unit test
* Upgrade export test with InfluxDB 1.12
* Fix typo of --export-process-filter help message #3314
* In the outdated feature, catch error message if Pypi server not reachable
* Add unit test for auto_unit
* Label error in docs #3286
* Put WebUI conf generator in a dedicated script
* Refactor the Makefile to generate WebUI config file for all webui targets
* Update sensors documentation #3275
* Update docker compose env quote #3273
* Update docker-compose.yml #3249
* Update API doc generation
* Update README with nice icons #3236
* Add documentation for WebUI test
Thanks to all contributors and bug reporters !
Special thanks to:
- Adi
- Bennett Kanuka
- Tim Potter
- Ariel Otilibili
- Boris Okassa
- Lawrence
- Shohei YOSHIDA
- jmwallach
- korn3r
=============
Version 4.3.3
=============
@ -424,7 +505,7 @@ See release note in Wiki format: https://github.com/nicolargo/glances/wiki/Glanc
**BREAKING CHANGES:**
* The minimal Python version is 3.8
* The Glances API version 3 is replaced by the version 4. So Restfull API URL is now /api/4/ #2610
* The Glances API version 3 is replaced by the version 4. So Restful API URL is now /api/4/ #2610
* Alias definition change in the configuration file #1735
Glances version 3.x and lower:
@ -449,9 +530,9 @@ Minimal requirements for Glances version 4 are:
* packaging
* ujson
* pydantic
* fastapi (for WebUI / RestFull API)
* uvicorn (for WebUI / RestFull API)
* jinja2 (for WebUI / RestFull API)
* fastapi (for WebUI / RestFul API)
* uvicorn (for WebUI / RestFul API)
* jinja2 (for WebUI / RestFul API)
Majors changes between Glances version 3 and version 4:
@ -511,7 +592,7 @@ Bug corrected:
CI and documentation:
* New logo for Glances version 4.0 #2713
* Update api.rst documentation #2496
* Update api-restful.rst documentation #2496
* Change Renovate config #2729
* Docker compose password unrecognized arguments when applying docs #2698
* Docker includes OS Release Volume mount info #2473
@ -889,7 +970,7 @@ Bugs corrected:
* Threading.Event.isSet is deprecated in Python 3.10 #2017
* Fix code scanning alert - Clear-text logging of sensitive information security #2006
* The gpu temperature unit are displayed incorrectly in web ui bug #2002
* Doc for 'alert' Restfull/JSON API response documentation #1994
* Doc for 'alert' Restful/JSON API response documentation #1994
* Show the spinning state of a disk documentation #1993
* Web server status check endpoint enhancement #1988
* --time parameter being ignored for client/server mode bug #1978
@ -984,7 +1065,7 @@ Bugs corrected:
* [3.2.0/3.2.1] keybinding not working anymore #1904
* InfluxDB/InfluxDB2 Export object has no attribute hostname #1899
Documentation: The "make docs" generate RestFull/API documentation file.
Documentation: The "make docs" generate RestFul/API documentation file.
===============
Version 3.2.1
@ -2011,7 +2092,7 @@ Version 2.1
* Add Glances log message (in the /tmp/glances.log file)
The default log level is INFO, you can switch to the DEBUG mode using the -d option on the command line.
* Add RESTful API to the Web server mode
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
RESTful API doc: https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
* Improve SNMP fallback mode for Cisco IOS, VMware ESXi
* Add --theme-white feature to optimize display for white background
* Experimental history feature (--enable-history option on the command line)

View File

@ -3,7 +3,7 @@ Glances - An Eye on your System
===============================
| |pypi| |test| |contributors| |quality|
| |starts| |docker| |pypistat| |ossrank|
| |starts| |docker| |pypistat|
| |sponsors| |twitter|
.. |pypi| image:: https://img.shields.io/pypi/v/glances.svg
@ -21,10 +21,6 @@ Glances - An Eye on your System
:target: https://pepy.tech/project/glances
:alt: Pypi downloads
.. |ossrank| image:: https://shields.io/endpoint?url=https://ossrank.com/shield/3689
:target: https://ossrank.com/p/3689
:alt: OSSRank
.. |test| image:: https://github.com/nicolargo/glances/actions/workflows/ci.yml/badge.svg?branch=develop
:target: https://github.com/nicolargo/glances/actions
:alt: Linux tests (GitHub Actions)
@ -45,8 +41,8 @@ Glances - An Eye on your System
:target: https://twitter.com/nicolargo
:alt: @nicolargo
Summary
=======
Summary 🌟
==========
**Glances** is an open-source system cross-platform monitoring tool.
It allows real-time monitoring of various aspects of your system such as
@ -58,21 +54,19 @@ and can also be used for remote monitoring of systems via a web interface or com
line interface. It is easy to install and use and can be customized to show only
the information that you are interested in.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-summary.png
In client/server mode, remote monitoring could be done via terminal,
Web interface or API (XML-RPC and RESTful).
Stats can also be exported to files or external time/value databases, CSV or direct
output to STDOUT.
.. image:: https://raw.githubusercontent.com/nicolargo/glances/develop/docs/_static/glances-responsive-webdesign.png
.. image:: ./docs/_static/glances-responsive-webdesign.png
Glances is written in Python and uses libraries to grab information from
your system. It is based on an open architecture where developers can
add new plugins or exports modules.
Usage
=====
Usage 👋
========
For the standalone mode, just run:
@ -80,6 +74,8 @@ For the standalone mode, just run:
$ glances
.. image:: ./docs/_static/glances-summary.png
For the Web server mode, run:
.. code-block:: console
@ -88,20 +84,22 @@ For the Web server mode, run:
and enter the URL ``http://<ip>:61208`` in your favorite web browser.
For the client/server mode, run:
In this mode, a HTTP/Restful API is exposed, see document `RestfulApi`_ for more details.
.. image:: ./docs/_static/screenshot-web.png
For the client/server mode (remote monitoring through XML-RPC), run the following command on the server:
.. code-block:: console
$ glances -s
on the server side and run:
and this one on the client:
.. code-block:: console
$ glances -c <ip>
on the client one.
You can also detect and display all Glances servers available on your
network (or defined in the configuration file) in TUI:
@ -147,17 +145,86 @@ or in a JSON format thanks to the stdout-json option (attribute not supported in
mem: {"total": 7837949952, "available": 2919079936, "percent": 62.8, "used": 4918870016, "free": 2919079936, "active": 2841214976, "inactive": 3340550144, "buffers": 546799616, "cached": 3068141568, "shared": 788156416}
...
Last but not least, you can use the fetch mode to get a quick look of a machine:
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: ./docs/_static/screenshot-fetch.png
and RTFM, always.
Documentation
=============
Use Glances as a Python library 📚
==================================
You can access the Glances API by importing the `glances.api` module and creating an
instance of the `GlancesAPI` class. This instance provides access to all Glances plugins
and their fields. For example, to access the CPU plugin and its total field, you can
use the following code:
.. code-block:: python
>>> from glances import api
>>> gl = api.GlancesAPI()
>>> gl.cpu
{'cpucore': 16,
'ctx_switches': 1214157811,
'guest': 0.0,
'idle': 91.4,
'interrupts': 991768733,
'iowait': 0.3,
'irq': 0.0,
'nice': 0.0,
'soft_interrupts': 423297898,
'steal': 0.0,
'syscalls': 0,
'system': 5.4,
'total': 7.3,
'user': 3.0}
>>> gl.cpu["total"]
7.3
>>> gl.mem["used"]
12498582144
>>> gl.auto_unit(gl.mem["used"])
11.6G
If the stats return a list of items (like network interfaces or processes), you can
access them by their name:
.. code-block:: python
>>> gl.network.keys()
['wlp0s20f3', 'veth33b370c', 'veth19c7711']
>>> gl.network["wlp0s20f3"]
{'alias': None,
'bytes_all': 362,
'bytes_all_gauge': 9242285709,
'bytes_all_rate_per_sec': 1032.0,
'bytes_recv': 210,
'bytes_recv_gauge': 7420522678,
'bytes_recv_rate_per_sec': 599.0,
'bytes_sent': 152,
'bytes_sent_gauge': 1821763031,
'bytes_sent_rate_per_sec': 433.0,
'interface_name': 'wlp0s20f3',
'key': 'interface_name',
'speed': 0,
'time_since_update': 0.3504955768585205}
For a complete example of how to use Glances as a library, have a look to the `PythonApi`_.
Documentation 📜
================
For complete documentation have a look at the readthedocs_ website.
If you have any question (after RTFM!), please post it on the official Q&A `forum`_.
If you have any question (after RTFM!), please post it on the official Reddit `forum`_.
Gateway to other services
=========================
Gateway to other services 🌐
============================
Glances can export stats to:
@ -178,8 +245,8 @@ Glances can export stats to:
- ``Graphite`` server
- ``RESTful`` endpoint
Installation
============
Installation 🚀
===============
There are several methods to test/install Glances on your system. Choose your weapon!
@ -262,6 +329,15 @@ Install Glances (with all features):
The glances script will be installed in the ~/.local/bin folder.
Brew: The missing package manager
---------------------------------
For Linux and Mac OS, it is also possible to install Glances with `Brew`_:
.. code-block:: console
brew install glances
Docker: the cloudy way
----------------------
@ -447,8 +523,8 @@ Ansible
A Glances ``Ansible`` role is available: https://galaxy.ansible.com/zaxos/glances-ansible-role/
Shell tab completion
====================
Shell tab completion 🔍
=======================
Glances 4.3.2 and higher includes shell tab autocompletion thanks to the --print-completion option.
@ -462,8 +538,8 @@ For example, on a Linux operating system with bash shell:
Following shells are supported: bash, zsh and tcsh.
Requirements
============
Requirements 🧩
===============
Glances is developed in Python. A minimal Python version 3.9 or higher
should be installed on your system.
@ -480,8 +556,9 @@ Dependencies:
- ``packaging`` (for the version comparison)
- ``windows-curses`` (Windows Curses implementation) [Windows-only]
- ``shtab`` (Shell autocompletion) [All but Windows]
- ``jinja2`` (for fetch mode and templating)
Optional dependencies:
Extra dependencies:
- ``batinfo`` (for battery monitoring)
- ``bernhard`` (for the Riemann export module)
@ -494,7 +571,6 @@ Optional dependencies:
- ``hddtemp`` (for HDD temperature monitoring support) [Linux-only]
- ``influxdb`` (for the InfluxDB version 1 export module)
- ``influxdb-client`` (for the InfluxDB version 2 export module)
- ``jinja2`` (for templating, used under the hood by FastAPI)
- ``kafka-python`` (for the Kafka export module)
- ``netifaces2`` (for the IP plugin)
- ``nvidia-ml-py`` (for the GPU plugin)
@ -516,8 +592,8 @@ Optional dependencies:
- ``wifi`` (for the wifi plugin) [Linux-only]
- ``zeroconf`` (for the autodiscover mode)
How to contribute ?
===================
How to contribute ? 🤝
======================
If you want to contribute to the Glances project, read this `wiki`_ page.
@ -526,8 +602,8 @@ There is also a chat dedicated to the Glances developers:
.. image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/nicolargo/glances?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
Project sponsorship
===================
Project sponsorship 🙌
======================
You can help me to achieve my goals of improving this open-source project
or just say "thank you" by:
@ -538,21 +614,21 @@ or just say "thank you" by:
Any and all contributions are greatly appreciated.
Author
======
Authors and Contributors 🔥
===========================
Nicolas Hennion (@nicolargo) <nicolas@nicolargo.com>
.. image:: https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40nicolargo
:target: https://twitter.com/nicolargo
License
=======
License 📜
==========
Glances is distributed under the LGPL version 3 license. See ``COPYING`` for more details.
More stars !
============
More stars ! 🌟
===============
Please give us a star on `GitHub`_ if you like this project.
@ -561,13 +637,17 @@ Please give us a star on `GitHub`_ if you like this project.
:alt: Star history
.. _psutil: https://github.com/giampaolo/psutil
.. _Brew: https://formulae.brew.sh/formula/glances
.. _Python: https://www.python.org/getit/
.. _Termux: https://play.google.com/store/apps/details?id=com.termux
.. _readthedocs: https://glances.readthedocs.io/
.. _forum: https://groups.google.com/forum/?hl=en#!forum/glances-users
.. _forum: https://www.reddit.com/r/glances/
.. _wiki: https://github.com/nicolargo/glances/wiki/How-to-contribute-to-Glances-%3F
.. _package: https://repology.org/project/glances/versions
.. _sponsors: https://github.com/sponsors/nicolargo
.. _wishlist: https://www.amazon.fr/hz/wishlist/ls/BWAAQKWFR3FI?ref_=wl_share
.. _Docker: https://github.com/nicolargo/glances/blob/develop/docs/docker.rst
.. _GitHub: https://github.com/nicolargo/glances
.. _PythonApi: https://glances.readthedocs.io/en/develop/api/python.html
.. _RestfulApi: https://glances.readthedocs.io/en/develop/api/restful.html

271
all-requirements.txt Normal file
View File

@ -0,0 +1,271 @@
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --all-extras --no-group dev --output-file all-requirements.txt
annotated-doc==0.0.3
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.11.0
# via
# elasticsearch
# starlette
batinfo==0.4.2 ; sys_platform == 'linux'
# via glances
bernhard==0.2.6
# via glances
cassandra-driver==3.29.3
# via glances
certifi==2025.10.5
# via
# elastic-transport
# influxdb-client
# influxdb3-python
# requests
cffi==2.0.0 ; implementation_name == 'pypy' or platform_python_implementation != 'PyPy'
# via
# cryptography
# pyzmq
charset-normalizer==3.4.4
# via requests
chevron==0.14.0
# via glances
click==8.1.8
# via
# geomet
# uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via
# click
# pytest
coverage==7.10.7 ; python_full_version < '3.10'
# via pytest-cov
coverage==7.11.0 ; python_full_version >= '3.10'
# via pytest-cov
cryptography==46.0.3
# via pysnmpcrypto
defusedxml==0.7.1
# via glances
dnspython==2.7.0 ; python_full_version < '3.10'
# via pymongo
dnspython==2.8.0 ; python_full_version >= '3.10'
# via pymongo
docker==7.1.0
# via glances
elastic-transport==9.1.0 ; python_full_version < '3.10'
# via elasticsearch
elastic-transport==9.2.0 ; python_full_version >= '3.10'
# via elasticsearch
elasticsearch==9.1.2 ; python_full_version < '3.10'
# via glances
elasticsearch==9.2.0 ; python_full_version >= '3.10'
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via
# anyio
# pytest
fastapi==0.120.4
# via glances
geomet==1.1.0
# via cassandra-driver
graphitesender==0.11.2
# via glances
h11==0.16.0
# via uvicorn
ibm-cloud-sdk-core==3.24.2
# via ibmcloudant
ibmcloudant==0.11.0
# via glances
idna==3.11
# via
# anyio
# requests
ifaddr==0.2.0
# via zeroconf
importlib-metadata==7.1.0 ; python_full_version < '3.10'
# via pygal
importlib-metadata==8.7.0 ; python_full_version >= '3.10'
# via pygal
influxdb==5.3.2
# via glances
influxdb-client==1.49.0
# via glances
influxdb3-python==0.16.0
# via glances
iniconfig==2.1.0 ; python_full_version < '3.10'
# via pytest
iniconfig==2.3.0 ; python_full_version >= '3.10'
# via pytest
jinja2==3.1.6
# via
# glances
# pysmi-lextudio
kafka-python==2.2.15
# via glances
markupsafe==3.0.3
# via jinja2
msgpack==1.1.2
# via influxdb
netifaces2==0.0.22
# via glances
nvidia-ml-py==13.580.82
# via glances
packaging==25.0
# via
# glances
# pytest
paho-mqtt==2.1.0
# via glances
pbkdf2==1.3
# via wifi
pika==1.3.2
# via glances
pluggy==1.6.0
# via pytest
ply==3.11
# via pysmi-lextudio
podman==5.6.0
# via glances
potsdb==1.0.3
# via glances
prometheus-client==0.23.1
# via glances
protobuf==4.25.8 ; python_full_version < '3.10'
# via bernhard
protobuf==6.33.0 ; python_full_version >= '3.10'
# via bernhard
psutil==7.1.2
# via glances
psycopg==3.2.12
# via glances
psycopg-binary==3.2.12 ; implementation_name != 'pypy'
# via psycopg
pyarrow==21.0.0 ; python_full_version < '3.10'
# via influxdb3-python
pyarrow==22.0.0 ; python_full_version >= '3.10'
# via influxdb3-python
pyasn1==0.6.0
# via pysnmp-lextudio
pycparser==2.23 ; (implementation_name != 'PyPy' and platform_python_implementation != 'PyPy') or (implementation_name == 'pypy' and platform_python_implementation == 'PyPy')
# via cffi
pydantic==2.12.3
# via fastapi
pydantic-core==2.41.4
# via pydantic
pygal==3.0.5
# via glances
pygments==2.19.2
# via pytest
pyjwt==2.10.1
# via
# ibm-cloud-sdk-core
# ibmcloudant
pymdstat==0.4.3
# via glances
pymongo==4.15.3
# via glances
pysmart-smartx==0.3.10
# via glances
pysmi-lextudio==1.4.3
# via pysnmp-lextudio
pysnmp-lextudio==6.3.0
# via glances
pysnmpcrypto==0.0.4
# via pysnmp-lextudio
pytest==8.4.2
# via pytest-cov
pytest-cov==4.1.0
# via pysnmp-lextudio
python-dateutil==2.9.0.post0
# via
# elasticsearch
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# influxdb-client
# influxdb3-python
pytz==2025.2
# via influxdb
pywin32==311 ; sys_platform == 'win32'
# via docker
pyzmq==27.1.0
# via glances
reactivex==4.0.4
# via
# influxdb-client
# influxdb3-python
requests==2.32.5
# via
# docker
# glances
# ibm-cloud-sdk-core
# ibmcloudant
# influxdb
# podman
# pysmi-lextudio
setuptools==80.9.0
# via
# influxdb-client
# wifi
shtab==1.7.2 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# influxdb
# python-dateutil
sniffio==1.3.1
# via
# anyio
# elastic-transport
# elasticsearch
sparklines==0.7.0
# via glances
starlette==0.49.2
# via fastapi
statsd==4.0.1
# via glances
termcolor==3.1.0 ; python_full_version < '3.10'
# via sparklines
termcolor==3.2.0 ; python_full_version >= '3.10'
# via sparklines
tomli==2.0.2 ; python_full_version <= '3.11'
# via
# coverage
# podman
# pytest
typing-extensions==4.15.0
# via
# anyio
# cryptography
# elasticsearch
# fastapi
# psycopg
# pydantic
# pydantic-core
# reactivex
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
tzdata==2025.2 ; sys_platform == 'win32'
# via psycopg
urllib3==2.5.0
# via
# docker
# elastic-transport
# ibm-cloud-sdk-core
# influxdb-client
# influxdb3-python
# podman
# requests
uvicorn==0.38.0
# via glances
wifi==0.3.8
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances
zeroconf==0.148.0
# via glances
zipp==3.23.0
# via importlib-metadata

View File

@ -49,7 +49,7 @@ history_size=1200
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -181,6 +181,8 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
@ -300,15 +302,32 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=True
#dm-0_rx_log=False
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=True
#dm-0_tx_log=False
[fs]
disable=False
@ -384,8 +403,8 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
@ -431,6 +450,8 @@ disable=False
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -526,8 +547,8 @@ disable=False
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
; disable_stats=diskio,networkio
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
; cpu_warning=70
@ -605,6 +626,11 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created

View File

@ -1,20 +1,483 @@
codespell
coverage
fonttools>=4.43.0 # not directly required, pinned by Snyk to avoid a vulnerability
gprof2dot
matplotlib
memory-profiler
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
pillow>=10.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
pre-commit
py-spy
pyright
pytest
requirements-parser
rstcheck
ruff
selenium
semgrep; platform_system == 'Linux'
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
webdriver-manager
h11>=0.16.0 # not directly required, pinned by Snyk to avoid a vulnerability
# This file was autogenerated by uv via the following command:
# uv export --no-hashes --only-dev --output-file dev-requirements.txt
alabaster==0.7.16 ; python_full_version < '3.10'
# via sphinx
alabaster==1.0.0 ; python_full_version >= '3.10'
# via sphinx
annotated-types==0.7.0
# via pydantic
anyio==4.11.0 ; python_full_version >= '3.10'
# via
# httpx
# mcp
# sse-starlette
# starlette
attrs==25.4.0
# via
# glom
# jsonschema
# outcome
# referencing
# reuse
# semgrep
# trio
babel==2.17.0
# via sphinx
binaryornot==0.4.4 ; python_full_version < '3.10'
# via reuse
boltons==21.0.0
# via
# face
# glom
# semgrep
boolean-py==5.0
# via
# license-expression
# reuse
bracex==2.6
# via wcmatch
certifi==2025.10.5
# via
# httpcore
# httpx
# requests
# selenium
cffi==2.0.0 ; implementation_name != 'pypy' and os_name == 'nt'
# via trio
cfgv==3.4.0
# via pre-commit
chardet==5.2.0 ; python_full_version < '3.10'
# via binaryornot
charset-normalizer==3.4.4
# via
# python-debian
# requests
click==8.1.8
# via
# click-option-group
# reuse
# semgrep
# typer
# uvicorn
click-option-group==0.5.9
# via semgrep
codespell==2.4.1
colorama==0.4.6
# via
# click
# pytest
# semgrep
# sphinx
contourpy==1.3.0 ; python_full_version < '3.10'
# via matplotlib
contourpy==1.3.2 ; python_full_version == '3.10.*'
# via matplotlib
contourpy==1.3.3 ; python_full_version >= '3.11'
# via matplotlib
cycler==0.12.1
# via matplotlib
defusedxml==0.7.1 ; python_full_version < '3.10'
# via semgrep
deprecated==1.3.1 ; python_full_version < '3.10'
# via
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
distlib==0.4.0
# via virtualenv
docutils==0.21.2
# via
# rstcheck-core
# sphinx
# sphinx-rtd-theme
exceptiongroup==1.2.2
# via
# anyio
# pytest
# semgrep
# trio
# trio-websocket
face==24.0.0
# via glom
filelock==3.19.1 ; python_full_version < '3.10'
# via virtualenv
filelock==3.20.0 ; python_full_version >= '3.10'
# via virtualenv
fonttools==4.60.1
# via matplotlib
glom==22.1.0
# via semgrep
googleapis-common-protos==1.71.0
# via opentelemetry-exporter-otlp-proto-http
gprof2dot==2025.4.14
h11==0.16.0
# via
# httpcore
# uvicorn
# wsproto
httpcore==1.0.9 ; python_full_version >= '3.10'
# via httpx
httpx==0.28.1 ; python_full_version >= '3.10'
# via mcp
httpx-sse==0.4.3 ; python_full_version >= '3.10'
# via mcp
identify==2.6.15
# via pre-commit
idna==3.11
# via
# anyio
# httpx
# requests
# trio
imagesize==1.4.1
# via sphinx
importlib-metadata==7.1.0 ; python_full_version < '3.10'
# via
# opentelemetry-api
# sphinx
importlib-metadata==8.7.0 ; python_full_version >= '3.10'
# via opentelemetry-api
importlib-resources==6.5.2 ; python_full_version < '3.10'
# via matplotlib
iniconfig==2.1.0 ; python_full_version < '3.10'
# via pytest
iniconfig==2.3.0 ; python_full_version >= '3.10'
# via pytest
jinja2==3.1.6
# via
# reuse
# sphinx
jsonschema==4.25.1
# via
# mcp
# semgrep
jsonschema-specifications==2025.9.1
# via jsonschema
kiwisolver==1.4.7 ; python_full_version < '3.10'
# via matplotlib
kiwisolver==1.4.9 ; python_full_version >= '3.10'
# via matplotlib
license-expression==30.4.4
# via reuse
markdown-it-py==3.0.0 ; python_full_version < '3.10'
# via rich
markdown-it-py==4.0.0 ; python_full_version >= '3.10'
# via rich
markupsafe==3.0.3
# via jinja2
matplotlib==3.9.4 ; python_full_version < '3.10'
matplotlib==3.10.7 ; python_full_version >= '3.10'
mcp==1.16.0 ; python_full_version >= '3.10'
# via semgrep
mdurl==0.1.2
# via markdown-it-py
memory-profiler==0.61.0
nodeenv==1.9.1
# via
# pre-commit
# pyright
numpy==2.0.2 ; python_full_version < '3.10'
# via
# contourpy
# matplotlib
numpy==2.2.6 ; python_full_version == '3.10.*'
# via
# contourpy
# matplotlib
numpy==2.3.4 ; python_full_version >= '3.11'
# via
# contourpy
# matplotlib
opentelemetry-api==1.25.0 ; python_full_version < '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# semgrep
opentelemetry-api==1.37.0 ; python_full_version >= '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# semgrep
opentelemetry-exporter-otlp-proto-common==1.25.0 ; python_full_version < '3.10'
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-common==1.37.0 ; python_full_version >= '3.10'
# via opentelemetry-exporter-otlp-proto-http
opentelemetry-exporter-otlp-proto-http==1.25.0 ; python_full_version < '3.10'
# via semgrep
opentelemetry-exporter-otlp-proto-http==1.37.0 ; python_full_version >= '3.10'
# via semgrep
opentelemetry-instrumentation==0.46b0 ; python_full_version < '3.10'
# via opentelemetry-instrumentation-requests
opentelemetry-instrumentation==0.58b0 ; python_full_version >= '3.10'
# via opentelemetry-instrumentation-requests
opentelemetry-instrumentation-requests==0.46b0 ; python_full_version < '3.10'
# via semgrep
opentelemetry-instrumentation-requests==0.58b0 ; python_full_version >= '3.10'
# via semgrep
opentelemetry-proto==1.25.0 ; python_full_version < '3.10'
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-proto==1.37.0 ; python_full_version >= '3.10'
# via
# opentelemetry-exporter-otlp-proto-common
# opentelemetry-exporter-otlp-proto-http
opentelemetry-sdk==1.25.0 ; python_full_version < '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
opentelemetry-sdk==1.37.0 ; python_full_version >= '3.10'
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
opentelemetry-semantic-conventions==0.46b0 ; python_full_version < '3.10'
# via
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
opentelemetry-semantic-conventions==0.58b0 ; python_full_version >= '3.10'
# via
# opentelemetry-instrumentation
# opentelemetry-instrumentation-requests
# opentelemetry-sdk
opentelemetry-util-http==0.46b0 ; python_full_version < '3.10'
# via opentelemetry-instrumentation-requests
opentelemetry-util-http==0.58b0 ; python_full_version >= '3.10'
# via opentelemetry-instrumentation-requests
outcome==1.3.0.post0
# via
# trio
# trio-websocket
packaging==25.0
# via
# matplotlib
# opentelemetry-instrumentation
# pytest
# requirements-parser
# semgrep
# sphinx
# webdriver-manager
peewee==3.18.2
# via semgrep
pillow==11.3.0 ; python_full_version < '3.10'
# via matplotlib
pillow==12.0.0 ; python_full_version >= '3.10'
# via matplotlib
platformdirs==4.4.0 ; python_full_version < '3.10'
# via virtualenv
platformdirs==4.5.0 ; python_full_version >= '3.10'
# via virtualenv
pluggy==1.6.0
# via pytest
pre-commit==4.3.0
# via
# googleapis-common-protos
# opentelemetry-proto
protobuf==6.33.0 ; python_full_version >= '3.10'
protobuf==4.25.8 ; python_full_version < '3.10'
# via
# googleapis-common-protos
# opentelemetry-proto
psutil==7.1.2
# via memory-profiler
py-spy==0.4.1
pycparser==2.23 ; implementation_name != 'PyPy' and implementation_name != 'pypy' and os_name == 'nt'
# via cffi
pydantic==2.12.3
# via
# mcp
# pydantic-settings
# rstcheck-core
pydantic-core==2.41.4
# via pydantic
pydantic-settings==2.11.0 ; python_full_version >= '3.10'
# via mcp
pygments==2.19.2
# via
# pytest
# rich
# sphinx
pyinstrument==5.1.1
pyparsing==3.2.5
# via matplotlib
pyright==1.1.407
pysocks==1.7.1
# via urllib3
pytest==8.4.2
python-dateutil==2.9.0.post0
# via matplotlib
python-debian==1.0.1
# via reuse
python-dotenv==1.2.1
# via
# pydantic-settings
# webdriver-manager
python-magic==0.4.27 ; python_full_version >= '3.10'
# via reuse
python-multipart==0.0.20 ; python_full_version >= '3.10'
# via mcp
pywin32==311 ; python_full_version >= '3.10' and sys_platform == 'win32'
# via
# mcp
# semgrep
pyyaml==6.0.3
# via pre-commit
referencing==0.36.2 ; python_full_version < '3.10'
# via
# jsonschema
# jsonschema-specifications
referencing==0.37.0 ; python_full_version >= '3.10'
# via
# jsonschema
# jsonschema-specifications
requests==2.32.5
# via
# opentelemetry-exporter-otlp-proto-http
# semgrep
# sphinx
# webdriver-manager
requirements-parser==0.13.0
reuse==5.1.1 ; python_full_version < '3.10'
reuse==6.2.0 ; python_full_version >= '3.10'
rich==13.5.3
# via
# semgrep
# typer
roman-numerals-py==3.1.0 ; python_full_version >= '3.11'
# via sphinx
rpds-py==0.27.1 ; python_full_version < '3.10'
# via
# jsonschema
# referencing
rpds-py==0.28.0 ; python_full_version >= '3.10'
# via
# jsonschema
# referencing
rstcheck==6.2.5
rstcheck-core==1.2.2
# via rstcheck
ruamel-yaml==0.18.16
# via semgrep
ruamel-yaml-clib==0.2.14 ; python_full_version >= '3.10' or platform_python_implementation == 'CPython'
# via
# ruamel-yaml
# semgrep
ruff==0.14.3
selenium==4.36.0 ; python_full_version < '3.10'
selenium==4.38.0 ; python_full_version >= '3.10'
semgrep==1.136.0 ; python_full_version < '3.10'
semgrep==1.142.0 ; python_full_version >= '3.10'
setuptools==80.9.0
# via opentelemetry-instrumentation
shellingham==1.5.4
# via typer
six==1.17.0
# via python-dateutil
sniffio==1.3.1
# via
# anyio
# trio
snowballstemmer==3.0.1
# via sphinx
sortedcontainers==2.4.0
# via trio
sphinx==7.4.7 ; python_full_version < '3.10'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx==8.1.3 ; python_full_version == '3.10.*'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx==8.2.3 ; python_full_version >= '3.11'
# via
# sphinx-rtd-theme
# sphinxcontrib-jquery
sphinx-rtd-theme==3.0.2
sphinxcontrib-applehelp==2.0.0
# via sphinx
sphinxcontrib-devhelp==2.0.0
# via sphinx
sphinxcontrib-htmlhelp==2.1.0
# via sphinx
sphinxcontrib-jquery==4.1
# via sphinx-rtd-theme
sphinxcontrib-jsmath==1.0.1
# via sphinx
sphinxcontrib-qthelp==2.0.0
# via sphinx
sphinxcontrib-serializinghtml==2.0.0
# via sphinx
sse-starlette==3.0.3 ; python_full_version >= '3.10'
# via mcp
starlette==0.49.2 ; python_full_version >= '3.10'
# via mcp
tomli==2.0.2
# via
# pytest
# semgrep
# sphinx
tomlkit==0.13.3
# via reuse
trio==0.31.0 ; python_full_version < '3.10'
# via
# selenium
# trio-websocket
trio==0.32.0 ; python_full_version >= '3.10'
# via
# selenium
# trio-websocket
trio-websocket==0.12.2
# via selenium
typer==0.20.0
# via rstcheck
typing-extensions==4.15.0
# via
# anyio
# opentelemetry-api
# opentelemetry-exporter-otlp-proto-http
# opentelemetry-sdk
# opentelemetry-semantic-conventions
# pydantic
# pydantic-core
# pyright
# referencing
# selenium
# semgrep
# starlette
# typer
# typing-inspection
# uvicorn
# virtualenv
typing-inspection==0.4.2
# via
# pydantic
# pydantic-settings
urllib3==2.5.0
# via
# requests
# selenium
# semgrep
uvicorn==0.38.0 ; python_full_version >= '3.10' and sys_platform != 'emscripten'
# via mcp
virtualenv==20.35.4
# via pre-commit
wcmatch==8.5.2
# via semgrep
webdriver-manager==4.0.2
websocket-client==1.9.0
# via selenium
wrapt==1.17.3
# via
# deprecated
# opentelemetry-instrumentation
wsproto==1.2.0
# via trio-websocket
zipp==3.23.0
# via
# importlib-metadata
# importlib-resources

View File

@ -1,7 +0,0 @@
psutil
defusedxml
orjson
reuse
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
sphinx
sphinx_rtd_theme

View File

@ -12,6 +12,9 @@ services:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
- "/run/user/1000/podman/podman.sock:/run/user/1000/podman/podman.sock:ro"
- "./glances.conf:/glances/conf/glances.conf"
# # Uncomment for proper distro information in upper panel.
# # Works only for distros that do have this file (most of distros do).
# - "/etc/os-release:/etc/os-release:ro"
environment:
- TZ=${TZ}
- GLANCES_OPT=-C /glances/conf/glances.conf -w

View File

@ -49,7 +49,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.
@ -181,6 +181,8 @@ temperature_critical=80
[mem]
disable=False
# Display available memory instead of used memory
#available=True
# Define RAM thresholds in %
# Default values if not defined: 50/70/90
careful=50
@ -300,15 +302,32 @@ hide_zero=False
#show=sda.*
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set latency thresholds (latency in ms) for a given disk name (rx = read / tx = write)
# dm-0_rx_latency_careful=10
# dm-0_rx_latency_warning=20
# dm-0_rx_latency_critical=50
# dm-0_rx_latency_log=False
# dm-0_tx_latency_careful=10
# dm-0_tx_latency_warning=20
# dm-0_tx_latency_critical=50
# dm-0_tx_latency_log=False
# There is no default bitrate thresholds for disk (because it is not possible to know the disk speed)
# Set bitrate thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
#dm-0_rx_careful=4000000000
#dm-0_rx_warning=5000000000
#dm-0_rx_critical=6000000000
#dm-0_rx_log=True
#dm-0_rx_log=False
#dm-0_tx_careful=700000000
#dm-0_tx_warning=900000000
#dm-0_tx_critical=1000000000
#dm-0_tx_log=True
#dm-0_tx_log=False
[fs]
disable=False
@ -384,8 +403,8 @@ port=7634
# Documentation: https://glances.readthedocs.io/en/latest/aoa/sensors.html
disable=False
# Set the refresh multiplicator for the sensors
# By default refresh every Glances refresh * 3 (increase to reduce CPU consumption)
#refresh=3
# By default refresh every Glances refresh * 5 (increase to reduce CPU consumption)
#refresh=5
# Hide some sensors (comma separated list of regexp)
hide=unknown.*
# Show only the following sensors (comma separated list of regexp)
@ -431,6 +450,8 @@ disable=False
# Stats that can be disabled: cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Stats that can not be disable: pid,name
#disable_stats=cpu_percent,memory_info,memory_percent,username,cpu_times,num_threads,nice,status,io_counters,cmdline
# Disable display of virtual memory
#disable_virtual_memory=True
# Define CPU/MEM (per process) thresholds in %
# Default values if not defined: 50/70/90
cpu_careful=50
@ -510,7 +531,8 @@ port_default_gateway=False
disable=True
# Define the maximum VMs size name (default is 20 chars)
max_name_size=20
# By default, Glances only display running VMs with states: 'Running', 'Starting' or 'Restarting'
# By default, Glances only display running VMs with states:
# 'Running', 'Paused', 'Starting' or 'Restarting'
# Set the following key to True to display all VMs regarding their states
all=False
@ -525,8 +547,8 @@ disable=False
# Define the maximum docker size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
; disable_stats=diskio,networkio
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Thresholds for CPU and MEM (in %)
; cpu_careful=50
; cpu_warning=70
@ -604,6 +626,11 @@ disable=False
# Exports
##############################################################################
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
#exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
[graph]
# Configuration for the --export graph option
# Set the path where the graph (.svg files) will be created

View File

@ -66,7 +66,7 @@ RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --upgrade pip
RUN python${PYTHON_VERSION} -m venv --without-pip venv
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
@ -74,9 +74,7 @@ FROM build AS buildminimal
ARG PYTHON_VERSION
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
-r docker-requirements.txt
##############################################################################
# BUILD: Install all the deps
@ -89,8 +87,7 @@ ARG CASS_DRIVER_NO_CYTHON=1
ARG CARGO_NET_GIT_FETCH_WITH_CLI=true
RUN /venv-build/bin/python${PYTHON_VERSION} -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r optional-requirements.txt
-r all-requirements.txt
##############################################################################
# RELEASE Stages

View File

@ -55,7 +55,7 @@ RUN apt-get clean \
RUN python3 -m venv --without-pip venv
COPY requirements.txt docker-requirements.txt webui-requirements.txt optional-requirements.txt ./
COPY pyproject.toml docker-requirements.txt all-requirements.txt ./
##############################################################################
# BUILD: Install the minimal image deps
@ -63,9 +63,7 @@ FROM build AS buildminimal
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r docker-requirements.txt \
-r webui-requirements.txt
-r docker-requirements.txt
##############################################################################
# BUILD: Install all the deps
@ -73,8 +71,7 @@ FROM build AS buildfull
ARG PYTHON_VERSION
RUN python3 -m pip install --target="/venv/lib/python${PYTHON_VERSION}/site-packages" \
-r requirements.txt \
-r optional-requirements.txt
-r all-requirements.txt
##############################################################################
# RELEASE Stages

View File

@ -1,10 +1,85 @@
# install with base requirements file
-r requirements.txt
docker>=6.1.1
orjson # JSON Serialization speedup
podman
python-dateutil
requests
six
urllib3
# This file was autogenerated by uv via the following command:
# uv export --no-emit-workspace --no-hashes --no-group dev --extra containers --extra web --output-file docker-requirements.txt
annotated-doc==0.0.3
# via fastapi
annotated-types==0.7.0
# via pydantic
anyio==4.11.0
# via starlette
certifi==2025.10.5
# via requests
charset-normalizer==3.4.4
# via requests
click==8.1.8
# via uvicorn
colorama==0.4.6 ; sys_platform == 'win32'
# via click
defusedxml==0.7.1
# via glances
docker==7.1.0
# via glances
exceptiongroup==1.2.2 ; python_full_version < '3.11'
# via anyio
fastapi==0.120.4
# via glances
h11==0.16.0
# via uvicorn
idna==3.11
# via
# anyio
# requests
jinja2==3.1.6
# via glances
markupsafe==3.0.3
# via jinja2
packaging==25.0
# via glances
podman==5.6.0
# via glances
psutil==7.1.2
# via glances
pydantic==2.12.3
# via fastapi
pydantic-core==2.41.4
# via pydantic
python-dateutil==2.9.0.post0
# via glances
pywin32==311 ; sys_platform == 'win32'
# via docker
requests==2.32.5
# via
# docker
# glances
# podman
shtab==1.7.2 ; sys_platform != 'win32'
# via glances
six==1.17.0
# via
# glances
# python-dateutil
sniffio==1.3.1
# via anyio
starlette==0.49.2
# via fastapi
tomli==2.0.2 ; python_full_version < '3.11'
# via podman
typing-extensions==4.15.0
# via
# anyio
# fastapi
# pydantic
# pydantic-core
# starlette
# typing-inspection
# uvicorn
typing-inspection==0.4.2
# via pydantic
urllib3==2.5.0
# via
# docker
# podman
# requests
uvicorn==0.38.0
# via glances
windows-curses==2.4.1 ; sys_platform == 'win32'
# via glances

View File

@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = ../venv/bin/sphinx-build
SPHINXBUILD = ../.venv/bin/sphinx-build
PAPER =
BUILDDIR = _build

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 256 KiB

After

Width:  |  Height:  |  Size: 203 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

After

Width:  |  Height:  |  Size: 33 KiB

BIN
docs/_static/screenshot-fetch.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

View File

@ -63,7 +63,7 @@ Within ``/etc/glances/actions.d/fs-critical.py``:
.. note::
You can use all the stats for the current plugin. See
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFULL-JSON-API
https://github.com/nicolargo/glances/wiki/The-Glances-RESTFUL-JSON-API
for the stats list.
It is also possible to repeat action until the end of the alert.

View File

@ -32,8 +32,8 @@ under the ``[containers]`` section:
# Define the maximum containers size name (default is 20 chars)
max_name_size=20
# List of stats to disable (not display)
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,command
disable_stats=diskio,networkio
# Following stats can be disabled: name,status,uptime,cpu,mem,diskio,networkio,ports,command
disable_stats=command
# Global containers' thresholds for CPU and MEM (in %)
cpu_careful=50
cpu_warning=70

View File

@ -5,17 +5,12 @@ Disk I/O
.. image:: ../_static/diskio.png
Glances displays the disk I/O throughput. The unit is adapted
dynamically.
You can display:
Glances displays the disk I/O throughput, count and mean latency:
- bytes per second (default behavior / Bytes/s, KBytes/s, MBytes/s, etc)
- requests per second (using --diskio-iops option or *B* hotkey)
- mean latency (using --diskio-latency option or *L* hotkey)
There is no alert on this information.
It's possible to define:
It's also possible to define:
- a list of disk to show (white list)
- a list of disks to hide
@ -42,13 +37,20 @@ Filtering is based on regular expression. Please be sure that your regular
expression works as expected. You can use an online tool like `regex101`_ in
order to test your regular expression.
It is also possible to define thesholds for bytes read and write per second:
It is also possible to define thesholds for latency and bytes read and write per second:
.. code-block:: ini
[diskio]
# Alias for sda1 and sdb1
#alias=sda1:SystemDisk,sdb1:DataDisk
# Default latency thresholds (in ms) (rx = read / tx = write)
rx_latency_careful=10
rx_latency_warning=20
rx_latency_critical=50
tx_latency_careful=10
tx_latency_warning=20
tx_latency_critical=50
# Set thresholds (in bytes per second) for a given disk name (rx = read / tx = write)
dm-0_rx_careful=4000000000
dm-0_rx_warning=5000000000

View File

@ -35,6 +35,11 @@ system:
[fs]
allow=shm
With the above configuration key, it is also possible to monitor NFS
mount points (allow=nfs). Be aware that this can slow down the
performance of the plugin if the NFS server is not reachable. In this
case, the plugin will wait for a 2 seconds timeout.
Also, you can hide mount points using regular expressions.
To hide all mount points starting with /boot and /snap:

View File

@ -27,7 +27,7 @@ Stats description:
is in RAM.
- **inactive**: (UNIX): memory that is marked as not used.
- **buffers**: (Linux, BSD): cache for things like file system metadata.
- **cached**: (Linux, BSD): cache for various things.
- **cached**: (Linux, BSD): cache for various things (including ZFS cache).
Additional stats available in through the API:
@ -41,6 +41,10 @@ Additional stats available in through the API:
- **shared**: (BSD): memory that may be simultaneously accessed by multiple
processes.
It is possible to display the available memory instead of the used memory
by setting the ``available`` option to ``True`` in the configuration file
under the ``[mem]`` section.
A character is also displayed just after the MEM header and shows the
trend value:

View File

@ -72,7 +72,7 @@ can also be used to set a threshold higher than zero.
.. code-block:: ini
[diskio]
[network]
hide_zero=True
hide_threshold_bytes=0

View File

@ -149,6 +149,18 @@ Columns display
pressing on the ``'/'`` key
========================= ==============================================
Disable display of virtual memory
---------------------------------
It's possible to disable the display of the VIRT column (virtual memory) by adding the
``disable_virtual_memory=True`` option in the ``[processlist]`` section of the configuration
file (glances.conf):
.. code-block:: ini
[processlist]
disable_virtual_memory=True
Process filtering
-----------------

View File

@ -33,6 +33,7 @@ thresholds (default behavor).
#temperature_core_careful=45
#temperature_core_warning=65
#temperature_core_critical=80
#alias=temp1:Motherboard 0,core 0:CPU Core 0
.. note 1::
The support for multiple batteries is only available if

1727
docs/api/python.rst Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -416,12 +416,18 @@ The following commands (key pressed) are supported while in Glances:
``F5`` or ``CTRL-R``
Refresh user interface
``LEFT``
``SHIFT-LEFT``
Navigation left through the process sort
``RIGHT``
``SHIFT-RIGHT``
Navigation right through the process sort
``LEFT``
Navigation left through the process name
``RIGHT``
Navigation right through the process name
``UP``
Up in the processes list

View File

@ -80,7 +80,7 @@ than a second one concerning the user interface:
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross-origin requests.

View File

@ -187,7 +187,7 @@ and make it visible to your container by adding it to ``docker-compose.yml`` as
image: nicolargo/glances:latest
restart: always
environment:
- GLANCES_OPT="-w --password"
- "GLANCES_OPT=-w --password"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
# Uncomment the below line if you want glances to display host OS detail instead of container's

View File

@ -18,3 +18,13 @@ On ARM64, Docker needs to be configured to allow access to the memory stats.
Edit the /boot/firmware/cmdline.txt and add the following configuration key:
cgroup_enable=memory
Netifaces issue ?
-----------------
Previously, Glances uses Netifaces to get network interfaces information.
Now, Glances uses Netifaces2.
Please uninstall Netifaces and install Netifaces2 instead.

60
docs/fetch.rst Normal file
View File

@ -0,0 +1,60 @@
.. _fetch:
Fetch
=====
The fetch mode is used to get and share a quick look of a machine using the
``fetch`` option. In this mode, current stats are displayed on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
The format of the template is based on the Jinja2 templating engine and can use all the stats
available in Glances through the ``gl`` variable (an instance of the :ref:`Glances Python API<api>`).
For example, the default template is define as:
.. code-block:: jinja
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
✨ {{ gl.system['hostname'] }}{{ ' - ' + gl.ip['address'] if gl.ip['address'] else '' }}
⚙️ {{ gl.system['hr_name'] }} | Uptime: {{ gl.uptime }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }} |\
{{ '%0.2f'| format(gl.load['min5']) }} |\
{{ '%0.2f'| format(gl.load['min15']) }}
⚡ CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} /\
{{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}\
💾 {% if loop.index == 1 %}DISK{% else %} {% endif %} {{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} /\
{{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}\
{% for net in gl.network.keys() %}\
📡 {% if loop.index == 1 %}NET{% else %} {% endif %} ↓ {{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s \
↑ {{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}\
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}\
{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
⚡ {{ process['cpu_percent'] }}% CPU\
{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}\
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}\
{{ loop.index }}️⃣ {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM\
{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }}\
⚡ {{ process['cpu_percent'] }}% CPU
{% endfor %}\
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

110
docs/gw/duckdb.rst Normal file
View File

@ -0,0 +1,110 @@
.. _duckdb:
DuckDB
===========
DuckDB is an in-process SQL OLAP database management system.
You can export statistics to a ``DuckDB`` server.
The connection should be defined in the Glances configuration file as
following:
.. code-block:: ini
[duckdb]
# database defines where data are stored, can be one of:
# /path/to/glances.db (see https://duckdb.org/docs/stable/clients/python/dbapi#file-based-connection)
# :memory:glances (see https://duckdb.org/docs/stable/clients/python/dbapi#in-memory-connection)
# Or anyone else supported by the API (see https://duckdb.org/docs/stable/clients/python/dbapi)
database=/tmp/glances.db
and run Glances with:
.. code-block:: console
$ glances --export duckdb
Data model
-----------
The data model is composed of one table per Glances plugin.
Example:
.. code-block:: python
>>> import duckdb
>>> db = duckdb.connect(database='/tmp/glances.db', read_only=True)
>>> db.sql("SELECT * from cpu")
┌─────────────────────┬─────────────────┬────────┬────────┬────────┬───┬────────────────────┬─────────────────────┬──────────────────────┬──────────────────────┬──────────────────────┐
│ time │ hostname_id │ total │ user │ nice │ … │ cpu_iowait_warning │ cpu_iowait_critical │ cpu_ctx_switches_c… │ cpu_ctx_switches_w… │ cpu_ctx_switches_c… │
│ time with time zone │ varchar │ double │ double │ double │ │ double │ double │ double │ double │ double │
├─────────────────────┼─────────────────┼────────┼────────┼────────┼───┼────────────────────┼─────────────────────┼──────────────────────┼──────────────────────┼──────────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ 8.0 │ 5.6 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:27+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:29+00 │ nicolargo-xps15 │ 4.3 │ 3.2 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:31+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:33+00 │ nicolargo-xps15 │ 14.9 │ 15.7 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:35+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:37+00 │ nicolargo-xps15 │ 8.2 │ 7.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:39+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:41+00 │ nicolargo-xps15 │ 12.7 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:50:43+00 │ nicolargo-xps15 │ 12.2 │ 10.3 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:32+00 │ nicolargo-xps15 │ 10.1 │ 7.4 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:34+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:36+00 │ nicolargo-xps15 │ 6.6 │ 4.9 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:38+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:40+00 │ nicolargo-xps15 │ 9.9 │ 7.5 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:42+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:44+00 │ nicolargo-xps15 │ 4.0 │ 3.1 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:46+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
│ 11:51:48+00 │ nicolargo-xps15 │ 11.1 │ 8.8 │ 0.0 │ … │ 5.625 │ 6.25 │ 640000.0 │ 720000.0 │ 800000.0 │
├─────────────────────┴─────────────────┴────────┴────────┴────────┴───┴────────────────────┴─────────────────────┴──────────────────────┴──────────────────────┴──────────────────────┤
│ 41 rows (20 shown) 47 columns (10 shown) │
└──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
>>> db.sql("SELECT * from cpu").fetchall()[0]
(datetime.time(11, 50, 25, tzinfo=datetime.timezone.utc), 'nicolargo-xps15', 8.0, 5.6, 0.0, 2.3, 91.9, 0.1, 0.0, 0.0, 0.0, 0, 0, 0, 0, 16, 2.4103684425354004, 90724823, 0, 63323797, 0, 30704572, 0, 0, 0, 1200.0, 65.0, 75.0, 85.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, True, 50.0, 70.0, 90.0, 5.0, 5.625, 6.25, 640000.0, 720000.0, 800000.0)
>>> db.sql("SELECT * from network")
┌─────────────────────┬─────────────────┬────────────────┬────────────┬────────────┬───┬─────────────────────┬────────────────┬────────────────────┬────────────────────┬───────────────────┐
│ time │ hostname_id │ key_id │ bytes_sent │ bytes_recv │ … │ network_tx_critical │ network_hide │ network_hide_no_up │ network_hide_no_ip │ network_hide_zero │
│ time with time zone │ varchar │ varchar │ int64 │ int64 │ │ double │ varchar │ boolean │ boolean │ boolean │
├─────────────────────┼─────────────────┼────────────────┼────────────┼────────────┼───┼─────────────────────┼────────────────┼────────────────────┼────────────────────┼───────────────────┤
│ 11:50:25+00 │ nicolargo-xps15 │ interface_name │ 407761 │ 32730 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:27+00 │ nicolargo-xps15 │ interface_name │ 2877 │ 4857 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:29+00 │ nicolargo-xps15 │ interface_name │ 44504 │ 32555 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:31+00 │ nicolargo-xps15 │ interface_name │ 1092285 │ 48600 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:33+00 │ nicolargo-xps15 │ interface_name │ 150119 │ 43805 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:35+00 │ nicolargo-xps15 │ interface_name │ 34424 │ 14825 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:37+00 │ nicolargo-xps15 │ interface_name │ 19382 │ 33614 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:39+00 │ nicolargo-xps15 │ interface_name │ 53060 │ 39780 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:41+00 │ nicolargo-xps15 │ interface_name │ 371914 │ 78626 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:50:43+00 │ nicolargo-xps15 │ interface_name │ 82356 │ 60612 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │ · │
│ 11:51:29+00 │ nicolargo-xps15 │ interface_name │ 3766 │ 9977 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:32+00 │ nicolargo-xps15 │ interface_name │ 188036 │ 18668 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:34+00 │ nicolargo-xps15 │ interface_name │ 543 │ 2451 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:36+00 │ nicolargo-xps15 │ interface_name │ 8247 │ 7275 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:38+00 │ nicolargo-xps15 │ interface_name │ 7252 │ 986 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:40+00 │ nicolargo-xps15 │ interface_name │ 172 │ 132 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:42+00 │ nicolargo-xps15 │ interface_name │ 8080 │ 6640 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:44+00 │ nicolargo-xps15 │ interface_name │ 19660 │ 17830 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:46+00 │ nicolargo-xps15 │ interface_name │ 1007030 │ 84170 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
│ 11:51:48+00 │ nicolargo-xps15 │ interface_name │ 128947 │ 18087 │ … │ 90.0 │ [docker.*, lo] │ true │ true │ true │
├─────────────────────┴─────────────────┴────────────────┴────────────┴────────────┴───┴─────────────────────┴────────────────┴────────────────────┴────────────────────┴───────────────────┤
│ 41 rows (20 shown) 28 columns (10 shown) │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
.. _duckdb: https://duckdb.org/

View File

@ -3,8 +3,24 @@
Gateway To Other Services
=========================
Glances can exports stats to a CSV file. Also, it can act as a gateway
to providing stats to multiple services (see list below).
Glances can exports stats in files or to other services like databases, message queues, etc.
Each exporter has its own configuration options, which can be set in the Glances
configuration file (`glances.conf`).
A common options section is also available:
is the `exclude_fields` option, which allows you to specify
.. code-block:: ini
[export]
# Common section for all exporters
# Do not export following fields (comma separated list of regex)
exclude_fields=.*_critical,.*_careful,.*_warning,.*\.key$
This section describes the available exporters and how to configure them:
.. toctree::
:maxdepth: 2
@ -14,6 +30,7 @@ to providing stats to multiple services (see list below).
couchdb
elastic
graph
graphite
influxdb
json
kafka

View File

@ -40,7 +40,7 @@ be added as a column in the table (named key_id) and added to the timescaledb.se
Current limitations
-------------------
Sensors and Fs plugins are not supported by the TimescaleDB exporter.
Sensors, Fs and DiskIO plugins are not supported by the TimescaleDB exporter.
In the cpu plugin, the user field is exported as user_cpu (user_percpu in the percpu plugin)
because user is a reserved keyword in PostgreSQL.

View File

@ -11,12 +11,11 @@ information depending on the terminal size.
It can also work in client/server mode. Remote monitoring can be
done via terminal, Web interface, or API (XMLRPC and RESTful).
Glances is written in Python and uses the `psutil`_ library to get
information from your system.
Stats can also be exported to :ref:`files or external databases<gw>`.
Stats can also be exported to external time/value databases.
.. _psutil: https://github.com/giampaolo/psutil
It is also possible to use it in your own Python scripts thanks to
the :ref:`Glances API<api>` or in any other application through
the :ref:`RESTful API<api_restful>`.
Table of Contents
=================
@ -30,7 +29,11 @@ Table of Contents
config
aoa/index
gw/index
api
api/python
api/restful
docker
faq
support
.. _psutil: https://github.com/giampaolo/psutil

View File

@ -28,7 +28,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]]
.in \\n[rst2man-indent\\n[rst2man-indent-level]]u
..
.TH "GLANCES" "1" "Jul 09, 2025" "4.3.3" "Glances"
.TH "GLANCES" "1" "Nov 02, 2025" "4.4.0rc1" "Glances"
.SH NAME
glances \- An eye on your system
.SH SYNOPSIS
@ -522,12 +522,18 @@ Switch between process command line or command name
.B \fBF5\fP or \fBCTRL\-R\fP
Refresh user interface
.TP
.B \fBLEFT\fP
.B \fBSHIFT\-LEFT\fP
Navigation left through the process sort
.TP
.B \fBRIGHT\fP
.B \fBSHIFT\-RIGHT\fP
Navigation right through the process sort
.TP
.B \fBLEFT\fP
Navigation left through the process name
.TP
.B \fBRIGHT\fP
Navigation right through the process name
.TP
.B \fBUP\fP
Up in the processes list
.TP
@ -666,7 +672,7 @@ max_processes_display=25
# You can download it in a specific folder
# thanks to https://github.com/nicolargo/glances/issues/2021
# then configure this folder with the webui_root_path key
# Default is folder where glances_restfull_api.py is hosted
# Default is folder where glances_restful_api.py is hosted
#webui_root_path=
# CORS options
# Comma separated list of origins that should be permitted to make cross\-origin requests.

View File

@ -4,11 +4,12 @@ Quickstart
==========
This page gives a good introduction to how to get started with Glances.
Glances offers three modes:
Glances offers multiple modes:
- Standalone
- Client/Server
- Web server
- Fetch
Standalone Mode
---------------
@ -196,7 +197,7 @@ Here's a screenshot from Chrome on Android:
.. image:: _static/screenshot-web2.png
How do you protect your server (or Web server) with a login/password ?
------------------------------------------------------------------
----------------------------------------------------------------------
You can set a password to access the server using the ``--password``.
By default, the login is ``glances`` but you can change it with
@ -222,3 +223,22 @@ file:
# Additionally (and optionally) a default password could be defined
localhost=mylocalhostpassword
default=mydefaultpassword
Fetch mode
----------
It is also possible to get and share a quick look of a machine using the
``fetch`` mode. In this mode, current stats are display on the console in
a fancy way.
.. code-block:: console
$ glances --fetch
Results look like this:
.. image:: _static/screenshot-fetch.png
It is also possible to use a custom template with the ``--fetch-template </path/to/template.jinja>`` option.
Have a look to the :ref:`fetch documentation page<fetch>` to learn how to create your own template.

View File

@ -14,7 +14,7 @@ test_args = core.get_args()
app = GlancesRestfulApi(config=test_config, args=test_args)._app
with open('./docs/openapi.json', 'w') as f:
with open('./docs/api/openapi.json', 'w') as f:
json.dump(
get_openapi(
title=app.title,

13
generate_webui_conf.py Normal file
View File

@ -0,0 +1,13 @@
import json
from glances.outputs.glances_curses import _GlancesCurses
print(
json.dumps(
{
"topMenu": list(_GlancesCurses._top),
"leftMenu": [p for p in _GlancesCurses._left_sidebar if p != "now"],
},
indent=4,
)
)

View File

@ -19,7 +19,8 @@ import tracemalloc
# Global name
# Version should start and end with a numerical char
# See https://packaging.python.org/specifications/core-metadata/#version
__version__ = "4.3.3"
# Examples: 1.0.0, 1.0.0rc1, 1.1.0_dev1
__version__ = "4.4.0rc1"
__apiversion__ = '4'
__author__ = 'Nicolas Hennion <nicolas@nicolargo.com>'
__license__ = 'LGPLv3'
@ -52,10 +53,10 @@ if psutil_version_info < psutil_min_version:
# Trac malloc is only available on Python 3.4 or higher
def __signal_handler(signal, frame):
logger.debug(f"Signal {signal} caught")
def __signal_handler(sig, frame):
logger.debug(f"Signal {sig} caught")
# Avoid Glances hang when killing process with muliple CTRL-C See #3264
signal.signal(signal.SIGINT, signal.SIG_IGN)
end()
@ -95,8 +96,8 @@ def check_memleak(args, mode):
def setup_server_mode(args, mode):
if args.stdout_issue or args.stdout_apidoc:
# Serve once for issue/test mode
if args.stdout_issue or args.stdout_api_restful_doc or args.stdout_api_doc:
# Serve once for issue and API documentation modes
mode.serve_issue()
else:
# Serve forever
@ -104,18 +105,18 @@ def setup_server_mode(args, mode):
def maybe_trace_memleak(args, snapshot_begin):
if args.memory_leak:
if args.trace_malloc or args.memory_leak:
snapshot_end = tracemalloc.take_snapshot()
if args.memory_leak:
snapshot_diff = snapshot_end.compare_to(snapshot_begin, 'filename')
memory_leak = sum([s.size_diff for s in snapshot_diff])
print(f"Memory consumption: {memory_leak / 1000:.1f}KB (see log for details)")
logger.info("Memory consumption (top 5):")
for stat in snapshot_diff[:5]:
logger.info(stat)
elif args.trace_malloc:
if args.trace_malloc:
# See more options here: https://docs.python.org/3/library/tracemalloc.html
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics("filename")
top_stats = snapshot_end.statistics("filename")
print("[ Trace malloc - Top 10 ]")
for stat in top_stats[:10]:
print(stat)

117
glances/api.py Normal file
View File

@ -0,0 +1,117 @@
#
# Glances - An eye on your system
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
from glances import __version__ as glances_version
from glances.globals import auto_unit, weak_lru_cache
from glances.main import GlancesMain
from glances.outputs.glances_bars import Bar
from glances.processes import sort_stats
from glances.stats import GlancesStats
plugin_dependencies_tree = {
'processlist': ['processcount'],
}
class GlancesAPI:
ttl = 2.0 # Default cache TTL in seconds
def __init__(self, config=None, args=None, args_begin_at=1):
self.__version__ = glances_version.split('.')[0] # Get the major version
core = GlancesMain(args_begin_at)
self.args = args if args is not None else core.get_args()
self.config = config if config is not None else core.get_config()
self._stats = GlancesStats(config=self.config, args=self.args)
# Set the cache TTL for the API
self.ttl = self.args.time if self.args.time is not None else self.ttl
# Init the stats of all plugins in order to ensure that rate are computed
self._stats.update()
@weak_lru_cache(maxsize=1, ttl=ttl)
def __getattr__(self, item):
"""Fallback to the stats object for any missing attributes."""
if item in self._stats.getPluginsList():
if item in plugin_dependencies_tree:
# Ensure dependencies are updated before accessing the plugin
for dependency in plugin_dependencies_tree[item]:
self._stats.get_plugin(dependency).update()
# Update the plugin stats
self._stats.get_plugin(item).update()
return self._stats.get_plugin(item)
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{item}'")
def plugins(self):
"""Return the list of available plugins."""
return self._stats.getPluginsList()
def auto_unit(self, number, low_precision=False, min_symbol='K', none_symbol='-'):
"""
Converts a numeric value into a human-readable string with appropriate units.
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
"""
return auto_unit(number, low_precision, min_symbol, none_symbol)
def bar(self, value, size=18, bar_char='', empty_char='', pre_char='', post_char=''):
"""
Generate a progress bar representation for a given value.
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
"""
b = Bar(
size, bar_char=bar_char, empty_char=empty_char, pre_char=pre_char, post_char=post_char, display_value=False
)
b.percent = value
return b.get()
def top_process(self, limit=3, sorted_by='cpu_percent', sorted_by_secondary='memory_percent'):
"""
Returns a list of the top processes sorted by specified criteria.
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
"""
# Exclude glances process from the top list
# because in fetch mode, Glances generate a CPU load
all_but_glances = [
p
for p in self._stats.get_plugin('processlist').get_raw()
if p['cmdline'] and 'glances' not in (p['cmdline'] or ())
]
return sort_stats(all_but_glances, sorted_by=sorted_by, sorted_by_secondary=sorted_by_secondary)[:limit]

View File

@ -280,8 +280,8 @@ class GlancesClient:
else:
# In quiet mode, we only wait adapated_refresh seconds
time.sleep(adapted_refresh)
except Exception as e:
logger.critical(f"Critical error in client serve_forever loop: {e}")
except Exception:
logger.critical("Critical error in client serve_forever loop")
self.end()
return self.client_mode

View File

@ -11,6 +11,8 @@ I am your father...
...for all Glances exports IF.
"""
import re
from glances.globals import NoOptionError, NoSectionError, json_dumps
from glances.logger import logger
from glances.timer import Counter
@ -53,6 +55,10 @@ class GlancesExport:
# Fields description
self._fields_description = None
# Load the default common export configuration
if self.config is not None:
self.load_common_conf()
def _log_result_decorator(fct):
"""Log (DEBUG) the result of the function fct."""
@ -71,6 +77,24 @@ class GlancesExport:
"""Close the export module."""
logger.debug(f"Finalise export interface {self.export_name}")
def load_common_conf(self):
"""Load the common export configuration in the Glances configuration file.
:returns: Boolean -- True if section is found
"""
# Read the common [export] section
section = "export"
opt = "exclude_fields"
try:
setattr(self, opt, self.config.get_list_value(section, opt))
except NoOptionError:
logger.debug(f"{opt} option not found in the {section} configuration section")
logger.debug(f"Load common {section} from the Glances configuration file")
return True
def load_conf(self, section, mandatories=["host", "port"], options=None):
"""Load the export <section> configuration in the Glances configuration file.
@ -101,7 +125,7 @@ class GlancesExport:
try:
setattr(self, opt, self.config.get_value(section, opt))
except NoOptionError:
pass
logger.debug(f"{opt} option not found in the {section} configuration section")
logger.debug(f"Load {section} from the Glances configuration file")
logger.debug(f"{section} parameters: { ({opt: getattr(self, opt) for opt in mandatories + options}) }")
@ -128,7 +152,7 @@ class GlancesExport:
d_tags = {}
if tags:
try:
d_tags = dict([x.split(":") for x in tags.split(",")])
d_tags = dict(x.split(":", 1) for x in tags.split(","))
except ValueError:
# one of the 'key:value' pairs was missing
logger.info("Invalid tags passed: %s", tags)
@ -199,6 +223,10 @@ class GlancesExport:
ret.append({"measurement": name, "tags": tags, "fields": fields})
return ret
def is_excluded(self, field):
"""Return true if the field is excluded."""
return any(re.fullmatch(i, field, re.I) for i in (getattr(self, 'exclude_fields') or ()))
def plugins_to_export(self, stats):
"""Return the list of plugins to export.
@ -266,7 +294,7 @@ class GlancesExport:
if isinstance(stats, dict):
# Stats is a dict
# Is there a key ?
if "key" in stats.keys() and stats["key"] in stats.keys():
if "key" in stats and stats["key"] in stats:
pre_key = "{}.".format(stats[stats["key"]])
else:
pre_key = ""
@ -285,6 +313,8 @@ class GlancesExport:
export_values += item_values
else:
# We are on a simple value
if self.is_excluded(pre_key + key.lower()):
continue
export_names.append(pre_key + key.lower())
export_values.append(value)
elif isinstance(stats, list):

View File

@ -12,7 +12,7 @@
# How to test ?
#
# 1) docker run -d -e COUCHDB_USER=admin -e COUCHDB_PASSWORD=admin -p 5984:5984 --name my-couchdb couchdb
# 2) ./venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 2) .venv/bin/python -m glances -C ./conf/glances.conf --export couchdb --quiet
# 3) Result can be seen at: http://127.0.0.1:5984/_utils
#

View File

@ -0,0 +1,195 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""DuckDB interface class."""
import sys
import time
from datetime import datetime
from platform import node
import duckdb
from glances.exports.export import GlancesExport
from glances.logger import logger
# Define the type conversions for DuckDB
# https://duckdb.org/docs/stable/clients/python/conversion
convert_types = {
'bool': 'BOOLEAN',
'int': 'BIGINT',
'float': 'DOUBLE',
'str': 'VARCHAR',
'tuple': 'VARCHAR', # Store tuples as VARCHAR (comma-separated)
'list': 'VARCHAR', # Store lists as VARCHAR (comma-separated)
'NoneType': 'VARCHAR',
}
class Export(GlancesExport):
"""This class manages the DuckDB export module."""
def __init__(self, config=None, args=None):
"""Init the DuckDB export IF."""
super().__init__(config=config, args=args)
# Mandatory configuration keys (additional to host and port)
self.db = None
# Optional configuration keys
self.user = None
self.password = None
self.hostname = None
# Load the configuration file
self.export_enable = self.load_conf(
'duckdb', mandatories=['database'], options=['user', 'password', 'hostname']
)
if not self.export_enable:
exit('Missing DuckDB config')
# The hostname is always add as an identifier in the DuckDB table
# so we can filter the stats by hostname
self.hostname = self.hostname or node().split(".")[0]
# Init the DuckDB client
self.client = self.init()
def init(self):
"""Init the connection to the DuckDB server."""
if not self.export_enable:
return None
try:
db = duckdb.connect(database=self.database)
except Exception as e:
logger.critical(f"Cannot connect to DuckDB {self.database} ({e})")
sys.exit(2)
else:
logger.info(f"Stats will be exported to DuckDB: {self.database}")
return db
def normalize(self, value):
# Nothing to do...
if isinstance(value, list) and len(value) == 1 and value[0] in ['True', 'False']:
return bool(value[0])
return value
def update(self, stats):
"""Update the DuckDB export module."""
if not self.export_enable:
return False
# Get all the stats & limits
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
# Loop over plugins to export
for plugin in self.last_exported_list():
# Remove some fields
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
# Remove the <plugin>_disable field
all_stats[plugin].pop(f"{plugin}_disable", None)
elif isinstance(all_stats[plugin], list):
for i in all_stats[plugin]:
i.update(all_limits[plugin])
# Remove the <plugin>_disable field
i.pop(f"{plugin}_disable", None)
else:
continue
plugin_stats = all_stats[plugin]
creation_list = [] # List used to create the DuckDB table
values_list = [] # List of values to insert (list of lists, one list per row)
if isinstance(plugin_stats, dict):
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
for key, value in plugin_stats.items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.extend([self.normalize(value) for value in plugin_stats.values()])
values_list = [item_list]
elif isinstance(plugin_stats, list) and len(plugin_stats) > 0 and 'key' in plugin_stats[0]:
# Create the list to create the table
creation_list.append('time TIMETZ')
creation_list.append('hostname_id VARCHAR')
creation_list.append('key_id VARCHAR')
for key, value in plugin_stats[0].items():
creation_list.append(f"{key} {convert_types[type(self.normalize(value)).__name__]}")
# Create the list of values to insert
for plugin_item in plugin_stats:
item_list = []
item_list.append(self.normalize(datetime.now().replace(microsecond=0)))
item_list.append(self.normalize(f"{self.hostname}"))
item_list.append(self.normalize(f"{plugin_item.get('key')}"))
item_list.extend([self.normalize(value) for value in plugin_item.values()])
values_list.append(item_list)
else:
continue
# Export stats to DuckDB
self.export(plugin, creation_list, values_list)
return True
def export(self, plugin, creation_list, values_list):
"""Export the stats to the DuckDB server."""
logger.debug(f"Export {plugin} stats to DuckDB")
# Create the table if it does not exist
table_list = [t[0] for t in self.client.sql("SHOW TABLES").fetchall()]
if plugin not in table_list:
# Execute the create table query
create_query = f"""
CREATE TABLE {plugin} (
{', '.join(creation_list)}
);"""
logger.debug(f"Create table: {create_query}")
try:
self.client.execute(create_query)
except Exception as e:
logger.error(f"Cannot create table {plugin}: {e}")
return
# Commit the changes
self.client.commit()
# Insert values into the table
for values in values_list:
insert_query = f"""
INSERT INTO {plugin} VALUES (
{', '.join(['?' for _ in values])}
);"""
logger.debug(f"Insert values into table {plugin}: {values}")
try:
self.client.execute(insert_query, values)
except Exception as e:
logger.error(f"Cannot insert data into table {plugin}: {e}")
# Commit the changes
self.client.commit()
def exit(self):
"""Close the DuckDB export module."""
# Force last write
self.client.commit()
# Close the DuckDB client
time.sleep(3) # Wait a bit to ensure all data is written
self.client.close()
# Call the father method
super().exit()

View File

@ -43,6 +43,9 @@ class Export(GlancesExport):
# Perhaps a better method is possible...
self._metric_dict = {}
# Keys name (compute in update() method)
self.keys_name = {}
# Init the Prometheus Exporter
self.init()
@ -56,29 +59,41 @@ class Export(GlancesExport):
else:
logger.info(f"Start Prometheus exporter on {self.host}:{self.port}")
def update(self, stats):
self.keys_name = {k: stats.get_plugin(k).get_key() for k in stats.getPluginsList()}
super().update(stats)
def export(self, name, columns, points):
"""Write the points to the Prometheus exporter using Gauge."""
logger.debug(f"Export {name} stats to Prometheus exporter")
# Remove non number stats and convert all to float (for Boolean)
data = {k: float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
data = {str(k): float(v) for k, v in zip(columns, points) if isinstance(v, Number)}
# Write metrics to the Prometheus exporter
for k, v in data.items():
# Prometheus metric name: prefix_<glances stats name>
metric_name = self.prefix + self.METRIC_SEPARATOR + str(name) + self.METRIC_SEPARATOR + str(k)
for metric, value in data.items():
labels = self.labels
metric_name = self.prefix + self.METRIC_SEPARATOR + name + self.METRIC_SEPARATOR
try:
obj, stat = metric.split('.')
metric_name += stat
labels += f",{self.keys_name.get(name)}:{obj}"
except ValueError:
metric_name += metric
# Prometheus is very sensible to the metric name
# See: https://prometheus.io/docs/practices/naming/
for c in ' .-/:[]':
metric_name = metric_name.replace(c, self.METRIC_SEPARATOR)
# Get the labels
labels = self.parse_tags(self.labels)
labels = self.parse_tags(labels)
# Manage an internal dict between metric name and Gauge
if metric_name not in self._metric_dict:
self._metric_dict[metric_name] = Gauge(metric_name, k, labelnames=listkeys(labels))
self._metric_dict[metric_name] = Gauge(metric_name, "", labelnames=listkeys(labels))
# Write the value
if hasattr(self._metric_dict[metric_name], 'labels'):
# Add the labels (see issue #1255)
self._metric_dict[metric_name].labels(**labels).set(v)
self._metric_dict[metric_name].labels(**labels).set(value)
else:
self._metric_dict[metric_name].set(v)
self._metric_dict[metric_name].set(value)

View File

@ -83,6 +83,9 @@ class Export(GlancesExport):
if isinstance(value, bool):
return str(value).upper()
if isinstance(value, (list, tuple)):
# Special case for list of one boolean
if len(value) == 1 and isinstance(value[0], bool):
return str(value[0]).upper()
return ', '.join([f"'{v}'" for v in value])
if isinstance(value, str):
return f"'{value}'"
@ -95,8 +98,8 @@ class Export(GlancesExport):
return False
# Get all the stats & limits
# Current limitation with sensors and fs plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs']]
# @TODO: Current limitation with sensors, fs and diskio plugins because fields list is not the same
self._last_exported_list = [p for p in self.plugins_to_export(stats) if p not in ['sensors', 'fs', 'diskio']]
all_stats = stats.getAllExportsAsDict(plugin_list=self.last_exported_list())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.last_exported_list())
@ -159,6 +162,9 @@ class Export(GlancesExport):
continue
# Export stats to TimescaleDB
# logger.info(plugin)
# logger.info(f"Segmented by: {segmented_by}")
# logger.info(list(zip(creation_list, values_list[0])))
self.export(plugin, creation_list, segmented_by, values_list)
return True

View File

@ -17,6 +17,7 @@ import base64
import errno
import functools
import importlib
import multiprocessing
import os
import platform
import queue
@ -96,6 +97,11 @@ viewkeys = methodcaller('keys')
viewvalues = methodcaller('values')
viewitems = methodcaller('items')
# Multiprocessing start method (on POSIX system)
if LINUX or BSD or SUNOS or MACOS:
ctx_mp_fork = multiprocessing.get_context('fork')
else:
ctx_mp_fork = multiprocessing.get_context()
###################
# GLOBALS FUNCTIONS
@ -127,18 +133,6 @@ def listvalues(d):
return list(d.values())
def iteritems(d):
return iter(d.items())
def iterkeys(d):
return iter(d.keys())
def itervalues(d):
return iter(d.values())
def u(s, errors='replace'):
if isinstance(s, text_type):
return s
@ -375,6 +369,13 @@ def json_loads(data: Union[str, bytes, bytearray]) -> Union[dict, list]:
return json.loads(data)
def list_to_dict(data):
"""Convert a list of dict (with key in 'key') to a dict with key as key and value as value."""
if not isinstance(data, list):
return None
return {item[item['key']]: item for item in data if 'key' in item}
def dictlist(data, item):
if isinstance(data, dict):
try:
@ -409,6 +410,65 @@ def dictlist_first_key_value(data: list[dict], key, value) -> Optional[dict]:
return ret
def auto_unit(number, low_precision=False, min_symbol='K', none_symbol='-'):
"""Make a nice human-readable string out of number.
Number of decimal places increases as quantity approaches 1.
CASE: 613421788 RESULT: 585M low_precision: 585M
CASE: 5307033647 RESULT: 4.94G low_precision: 4.9G
CASE: 44968414685 RESULT: 41.9G low_precision: 41.9G
CASE: 838471403472 RESULT: 781G low_precision: 781G
CASE: 9683209690677 RESULT: 8.81T low_precision: 8.8T
CASE: 1073741824 RESULT: 1024M low_precision: 1024M
CASE: 1181116006 RESULT: 1.10G low_precision: 1.1G
:low_precision: returns less decimal places potentially (default is False)
sacrificing precision for more readability.
:min_symbol: Do not approach if number < min_symbol (default is K)
:decimal_count: if set, force the number of decimal number (default is None)
"""
if number is None:
return none_symbol
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
if min_symbol in symbols:
symbols = symbols[symbols.index(min_symbol) :]
prefix = {
'Y': 1208925819614629174706176,
'Z': 1180591620717411303424,
'E': 1152921504606846976,
'P': 1125899906842624,
'T': 1099511627776,
'G': 1073741824,
'M': 1048576,
'K': 1024,
}
if number == 0:
# Avoid 0.0
return '0'
# If a value is a float, decimal_precision is 2 else 0
decimal_precision = 2 if isinstance(number, float) else 0
for symbol in reversed(symbols):
value = float(number) / prefix[symbol]
if value > 1:
decimal_precision = 0
if value < 10:
decimal_precision = 2
elif value < 100:
decimal_precision = 1
if low_precision:
if symbol in 'MK':
decimal_precision = 0
else:
decimal_precision = min(1, decimal_precision)
elif symbol in 'K':
decimal_precision = 0
return '{:.{decimal}f}{symbol}'.format(value, decimal=decimal_precision, symbol=symbol)
return f'{number:.{decimal_precision}f}'
def string_value_to_float(s):
"""Convert a string with a value and an unit to a float.
Example:
@ -531,3 +591,43 @@ def atoi(text):
def natural_keys(text):
"""Return a text in a natural/human readable format."""
return [atoi(c) for c in re.split(r'(\d+)', text)]
def exit_after(seconds, default=None):
"""Exit the function if it takes more than 'seconds' seconds to complete.
In this case, return the value of 'default' (default: None)."""
def handler(q, func, args, kwargs):
q.put(func(*args, **kwargs))
def decorator(func):
if not LINUX:
return func
def wraps(*args, **kwargs):
try:
q = ctx_mp_fork.Queue()
except PermissionError:
# Manage an exception in Snap packages on Linux
# The strict mode prevent the use of multiprocessing.Queue()
# There is a "dirty" hack:
# https://forum.snapcraft.io/t/python-multiprocessing-permission-denied-in-strictly-confined-snap/15518/2
# But i prefer to just disable the timeout feature in this case
func(*args, **kwargs)
else:
p = ctx_mp_fork.Process(target=handler, args=(q, func, args, kwargs))
p.start()
p.join(timeout=seconds)
if not p.is_alive():
return q.get()
p.terminate()
p.join(timeout=0.1)
if p.is_alive():
# Kill in case processes doesn't terminate
# Happens with cases like broken NFS connections
p.kill()
return default
return wraps
return decorator

View File

@ -379,7 +379,7 @@ Examples of use:
default=None,
type=str,
dest='export_process_filter',
help='set the export process filter (comman separated list of regular expression)',
help='set the export process filter (comma-separated list of regular expression)',
)
# Client/Server option
parser.add_argument(
@ -550,7 +550,18 @@ Examples of use:
help='test memory leak (python 3.4 or higher needed)',
)
parser.add_argument(
'--api-doc', default=None, action='store_true', dest='stdout_apidoc', help='display fields descriptions'
'--api-doc',
default=None,
action='store_true',
dest='stdout_api_doc',
help='display Python API documentation',
)
parser.add_argument(
'--api-restful-doc',
default=None,
action='store_true',
dest='stdout_api_restful_doc',
help='display Restful API documentation',
)
if not WINDOWS:
parser.add_argument(
@ -582,6 +593,13 @@ Examples of use:
dest='diskio_iops',
help='show IO per second in the DiskIO plugin',
)
parser.add_argument(
'--diskio-latency',
action='store_true',
default=False,
dest='diskio_latency',
help='show IO latency in the DiskIO plugin',
)
parser.add_argument(
'--fahrenheit',
action='store_true',
@ -630,6 +648,22 @@ Examples of use:
default='',
help='strftime format string for displaying current date in standalone mode',
)
# Fetch
parser.add_argument(
'--fetch',
'--stdout-fetch',
action='store_true',
default=False,
dest='stdout_fetch',
help='display a (neo)fetch like summary and exit',
)
parser.add_argument(
'--fetch-template',
'--stdout-fetch-template',
dest='fetch_template',
default='',
help='overwrite default fetch template file',
)
return parser
@ -689,7 +723,10 @@ Examples of use:
args.network_cumul = False
# Processlist is updated in processcount
if getattr(args, 'enable_processlist', False) or getattr(args, 'enable_programlist', False):
if getattr(args, 'disable_processcount', False):
logger.warning('Processcount is disable, so processlist (updated by processcount) is also disable')
disable(args, 'processlist')
elif getattr(args, 'enable_processlist', False) or getattr(args, 'enable_programlist', False):
enable(args, 'processcount')
# Set a default export_process_filter (with all process) when using the stdout mode
@ -787,6 +824,10 @@ Examples of use:
disable(args, 'memswap')
disable(args, 'load')
# Unicode => No separator
if args.disable_unicode:
args.enable_separator = False
# Memory leak
if getattr(args, 'memory_leak', False):
logger.info('Memory leak detection enabled')
@ -796,9 +837,14 @@ Examples of use:
args.time = 1
args.disable_history = True
# Unicode => No separator
if args.disable_unicode:
args.enable_separator = False
# Disable history if history_size is 0
if self.config.has_section('global'):
if self.config.get_int_value('global', 'history_size', default=1200) == 0:
args.disable_history = True
# Display an information message if history is disabled
if args.disable_history:
logger.info("Stats history is disabled")
def parse_args(self, args_begin_at):
"""Parse command line arguments.

View File

@ -13,11 +13,10 @@ import os
import pickle
import threading
from datetime import datetime, timedelta
from ssl import CertificateError
from glances import __version__
from glances.config import user_cache_dir
from glances.globals import HTTPError, URLError, nativestr, safe_makedirs, urlopen
from glances.globals import nativestr, safe_makedirs, urlopen
from glances.logger import logger
try:
@ -155,7 +154,7 @@ class Outdated:
try:
res = urlopen(PYPI_API_URL, timeout=3).read()
except (HTTPError, URLError, CertificateError) as e:
except Exception as e:
logger.debug(f"Cannot get Glances version from the PyPI RESTful API ({e})")
else:
self.data['latest_version'] = json.loads(nativestr(res))['info']['version']

View File

@ -29,8 +29,8 @@ class Bar:
size,
bar_char='|',
empty_char=' ',
pre_char='[',
post_char=']',
pre_char='',
post_char='',
unit_char='%',
display_value=True,
min_value=0,
@ -42,8 +42,8 @@ class Bar:
size (_type_): Bar size
bar_char (str, optional): Bar character. Defaults to '|'.
empty_char (str, optional): Empty character. Defaults to ' '.
pre_char (str, optional): Display this char before the bar. Defaults to '['.
post_char (str, optional): Display this char after the bar. Defaults to ']'.
pre_char (str, optional): Display this char before the bar. Defaults to ''.
post_char (str, optional): Display this char after the bar. Defaults to ''.
unit_char (str, optional): Unit char to be displayed. Defaults to '%'.
display_value (bool, optional): Do i need to display the value. Defaults to True.
min_value (int, optional): Minimum value. Defaults to 0.
@ -68,12 +68,10 @@ class Bar:
@property
def size(self, with_decoration=False):
# Return the bar size, with or without decoration
if with_decoration:
return self.__size
# Return the bar size
if self.__display_value:
return self.__size - 6
return None
return self.__size
@property
def percent(self):
@ -105,6 +103,9 @@ class Bar:
whole += 1
ret += self.__empty_char * int(self.size - whole)
# Add the post and pre chars
ret = f'{self.__pre_char}{ret}{self.__post_char}'
# Add the value
if self.__display_value:
if self.percent >= self.max_value:

View File

@ -51,7 +51,7 @@ class _GlancesCurses:
'a': {'sort_key': 'auto'},
'A': {'switch': 'disable_amps'},
'b': {'switch': 'byte'},
'B': {'switch': 'diskio_iops'},
'B': {'handler': '_handle_diskio_iops'},
'c': {'sort_key': 'cpu_percent'},
'C': {'switch': 'disable_cloud'},
'd': {'switch': 'disable_diskio'},
@ -69,6 +69,7 @@ class _GlancesCurses:
# 'k' > Kill selected process
'K': {'switch': 'disable_connections'},
'l': {'switch': 'disable_alert'},
'L': {'handler': '_handle_diskio_latency'},
'm': {'sort_key': 'memory_percent'},
'M': {'switch': 'reset_minmax_tag'},
'n': {'switch': 'disable_network'},
@ -92,8 +93,10 @@ class _GlancesCurses:
'z': {'handler': '_handle_disable_process'},
'+': {'handler': '_handle_increase_nice'},
'-': {'handler': '_handle_decrease_nice'},
# "<" (left arrow) navigation through process sort
# ">" (right arrow) navigation through process sort
# "<" (shift + left arrow) navigation through process sort
# ">" (shift + right arrow) navigation through process sort
# "<" (left arrow) scroll through process name
# ">" (right arrow) scroll through process name
# 'UP' > Up in the server list
# 'DOWN' > Down in the server list
}
@ -107,7 +110,7 @@ class _GlancesCurses:
# Define left sidebar
# This variable is used in the make webui task in order to generate the
# glances/outputs/static/js/uiconfig.json file for the web interface
# This lidt can also be overwritten by the configuration file ([outputs] left_menu option)
# This list can also be overwritten by the configuration file ([outputs] left_menu option)
_left_sidebar = [
'network',
'ports',
@ -185,6 +188,8 @@ class _GlancesCurses:
# Init Glances cursor
self.args.cursor_position = 0
self.args.cursor_process_name_position = 0
# For the moment cursor only available in standalone mode
self.args.disable_cursor = not self.args.is_standalone
@ -193,6 +198,9 @@ class _GlancesCurses:
self.term_window.nodelay(1)
self.pressedkey = -1
# Is this the end ?
self.is_end = False
# History tag
self._init_history()
@ -247,7 +255,6 @@ class _GlancesCurses:
pass
def get_key(self, window):
# TODO: Check issue #163
return window.getch()
def catch_actions_from_hotkey(self, hotkey):
@ -263,8 +270,10 @@ class _GlancesCurses:
{
self.pressedkey in {ord('e')} and not self.args.programs: self._handle_process_extended,
self.pressedkey in {ord('k')} and not self.args.disable_cursor: self._handle_kill_process,
self.pressedkey in {curses.KEY_LEFT}: self._handle_sort_left,
self.pressedkey in {curses.KEY_RIGHT}: self._handle_sort_right,
self.pressedkey in {curses.KEY_SLEFT}: self._handle_sort_left,
self.pressedkey in {curses.KEY_SRIGHT}: self._handle_sort_right,
self.pressedkey in {curses.KEY_LEFT}: self._handle_process_name_left,
self.pressedkey in {curses.KEY_RIGHT}: self._handle_process_name_right,
self.pressedkey in {curses.KEY_UP, 65} and not self.args.disable_cursor: self._handle_cursor_up,
self.pressedkey in {curses.KEY_DOWN, 66} and not self.args.disable_cursor: self._handle_cursor_down,
self.pressedkey in {curses.KEY_F5, 18}: self._handle_refresh,
@ -350,6 +359,13 @@ class _GlancesCurses:
def _handle_kill_process(self):
self.kill_process = not self.kill_process
def _handle_process_name_left(self):
if self.args.cursor_process_name_position > 0:
self.args.cursor_process_name_position -= 1
def _handle_process_name_right(self):
self.args.cursor_process_name_position += 1
def _handle_clean_logs(self):
glances_events.clean()
@ -363,6 +379,18 @@ class _GlancesCurses:
else:
glances_processes.enable()
def _handle_diskio_iops(self):
"""Switch between bytes/s and IOPS for Disk IO."""
self.args.diskio_iops = not self.args.diskio_iops
if self.args.diskio_iops:
self.args.diskio_latency = False
def _handle_diskio_latency(self):
"""Switch between bytes/s and latency for Disk IO."""
self.args.diskio_latency = not self.args.diskio_latency
if self.args.diskio_latency:
self.args.diskio_iops = False
def _handle_sort_left(self):
next_sort = (self.loop_position() - 1) % len(self._sort_loop)
glances_processes.set_sort_key(self._sort_loop[next_sort], False)
@ -384,6 +412,10 @@ class _GlancesCurses:
logger.info("Stop Glances client and return to the browser")
else:
logger.info(f"Stop Glances (keypressed: {self.pressedkey})")
# End the curses window
self.end()
# Exit the program
sys.exit(0)
def _handle_refresh(self):
glances_processes.reset_internal_cache()
@ -430,6 +462,7 @@ class _GlancesCurses:
curses.endwin()
except Exception:
pass
self.is_end = True
def init_line_column(self):
"""Init the line and column position for the curses interface."""
@ -1128,6 +1161,11 @@ class _GlancesCurses:
while not countdown.finished() and not isexitkey:
# Getkey
pressedkey = self.__catch_key(return_to_browser=return_to_browser)
if pressedkey == -1:
self.wait()
continue
isexitkey = pressedkey == ord('\x1b') or pressedkey == ord('q')
if pressedkey == curses.KEY_F5 or self.pressedkey == 18:
@ -1135,7 +1173,7 @@ class _GlancesCurses:
self.clear()
return isexitkey
if pressedkey in (curses.KEY_UP, 65, curses.KEY_DOWN, 66):
if pressedkey in (curses.KEY_UP, 65, curses.KEY_DOWN, 66, curses.KEY_LEFT, 68, curses.KEY_RIGHT, 67):
# Up of won key pressed, reset the countdown
# Better for user experience
countdown.reset()

View File

@ -10,6 +10,7 @@
import curses
import math
import sys
from glances.logger import logger
from glances.outputs.glances_curses import _GlancesCurses
@ -49,7 +50,6 @@ class GlancesCursesBrowser(_GlancesCurses):
self._page_max = 0
self._page_max_lines = 0
self.is_end = False
self._revesed_sorting = False
self._stats_list = None
@ -87,7 +87,7 @@ class GlancesCursesBrowser(_GlancesCurses):
counts[color] = counts.get(color, 0) + 1
result = ''
for key in counts.keys():
for key in counts:
result += key + ': ' + str(counts[key]) + ' '
return result
@ -157,8 +157,7 @@ class GlancesCursesBrowser(_GlancesCurses):
# 'ESC'|'q' > Quit
self.end()
logger.info("Stop Glances client browser")
# sys.exit(0)
self.is_end = True
sys.exit(0)
elif self.pressedkey == 10:
# 'ENTER' > Run Glances on the selected server
self.active_server = self._current_page * self._page_max_lines + self.cursor_position
@ -327,10 +326,15 @@ class GlancesCursesBrowser(_GlancesCurses):
y += 1
# Second line (for item/key)
for k, v in column_def.items():
if xc >= screen_x or y >= screen_y or v is None:
continue
k_split = k.split('_')
if xc < screen_x and y < screen_y and v is not None:
self.term_window.addnstr(y, xc, ' '.join(k_split[1:]).upper(), screen_x - x, self.colors_list['BOLD'])
xc += v + self.space_between_column
if len(k_split) == 1:
header_str = k_split[0]
else:
header_str = ' '.join(k_split[1:])
self.term_window.addnstr(y, xc, header_str.upper(), screen_x - x, self.colors_list['BOLD'])
xc += v + self.space_between_column
y += 1
# If a servers has been deleted from the list...

View File

@ -6,7 +6,7 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""RestFull API interface class."""
"""RestFul API interface class."""
import os
import socket
@ -21,6 +21,7 @@ from glances.events_list import glances_events
from glances.globals import json_dumps
from glances.logger import logger
from glances.password import GlancesPassword
from glances.plugins.plugin.dag import get_plugin_dependencies
from glances.processes import glances_processes
from glances.servers_list import GlancesServersList
from glances.servers_list_dynamic import GlancesAutoDiscoverClient
@ -183,10 +184,12 @@ class GlancesRestfulApi:
self.url_prefix = self.url_prefix.rstrip('/')
logger.debug(f'URL prefix: {self.url_prefix}')
def __update_stats(self):
def __update_stats(self, plugins_list_to_update=None):
# Never update more than 1 time per cached_time
if self.timer.finished():
self.stats.update()
# Also update if specific plugins are requested
# In this case, lru_cache will handle the stat's update frequency
if self.timer.finished() or plugins_list_to_update:
self.stats.update(plugins_list_to_update=plugins_list_to_update)
self.timer = Timer(self.args.cached_time)
def __update_servers_list(self):
@ -436,7 +439,8 @@ class GlancesRestfulApi:
HTTP/1.1 404 Not Found
"""
# Update the stat
self.__update_stats()
# TODO: Why ??? Try to comment it
# self.__update_stats()
try:
plist = self.plugins_list
@ -528,7 +532,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat ID
@ -559,7 +563,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat ID
@ -585,7 +589,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat ID
@ -645,7 +649,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -670,7 +674,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -695,7 +699,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -719,7 +723,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat views
@ -744,7 +748,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value of the stat history
@ -803,7 +807,7 @@ class GlancesRestfulApi:
self._check_if_plugin_available(plugin)
# Update the stat
self.__update_stats()
self.__update_stats(get_plugin_dependencies(plugin))
try:
# Get the RAW value

View File

@ -0,0 +1,286 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Generate Glances Python API documentation."""
from pprint import pformat
from glances import api
APIDOC_HEADER = """\
.. _api:
Python API documentation
========================
This documentation describes the Glances Python API.
Note: This API is only available in Glances 4.4.0 or higher.
"""
def printtab(s, indent=' '):
print(indent + s.replace('\n', '\n' + indent))
def print_tldr(gl):
"""Print the TL;DR section of the API documentation."""
sub_title = 'TL;DR'
print(sub_title)
print('-' * len(sub_title))
print('')
print('You can access the Glances API by importing the `glances.api` module and creating an')
print('instance of the `GlancesAPI` class. This instance provides access to all Glances plugins')
print('and their fields. For example, to access the CPU plugin and its total field, you can')
print('use the following code:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> from glances import api')
printtab('>>> gl = api.GlancesAPI()')
printtab('>>> gl.cpu')
printtab(f'{pformat(gl.cpu.stats)}')
printtab('>>> gl.cpu["total"]')
printtab(f'{gl.cpu["total"]}')
printtab('>>> gl.mem["used"]')
printtab(f'{gl.mem["used"]}')
printtab('>>> gl.auto_unit(gl.mem["used"])')
printtab(f'{gl.auto_unit(gl.mem["used"])}')
print('')
print('If the stats return a list of items (like network interfaces or processes), you can')
print('access them by their name:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.network.keys()')
printtab(f'{gl.network.keys()}')
printtab(f'>>> gl.network["{gl.network.keys()[0]}"]')
printtab(f'{pformat(gl.network[gl.network.keys()[0]])}')
print('')
def print_init_api(gl):
sub_title = 'Init Glances Python API'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Init the Glances API:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> from glances import api')
printtab('>>> gl = api.GlancesAPI()')
print('')
def print_plugins_list(gl):
sub_title = 'Get Glances plugins list'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Get the plugins list:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.plugins()')
printtab(f'{gl.plugins()}')
print('')
def print_plugin(gl, plugin):
"""Print the details of a single plugin."""
sub_title = f'Glances {plugin}'
print(sub_title)
print('-' * len(sub_title))
print('')
stats_obj = gl.__getattr__(plugin)
print(f'{plugin.capitalize()} stats:')
print('')
print('.. code-block:: python')
print('')
printtab(f'>>> type(gl.{plugin})')
printtab(f'{type(stats_obj)}')
if len(stats_obj.keys()) > 0 and isinstance(stats_obj[stats_obj.keys()[0]], dict):
printtab(f'>>> gl.{plugin}')
printtab(f'Return a dict of dict with key=<{stats_obj[stats_obj.keys()[0]]["key"]}>')
printtab(f'>>> gl.{plugin}.keys()')
printtab(f'{stats_obj.keys()}')
printtab(f'>>> gl.{plugin}["{stats_obj.keys()[0]}"]')
printtab(f'{pformat(stats_obj[stats_obj.keys()[0]])}')
else:
printtab(f'>>> gl.{plugin}')
printtab(f'{pformat(stats_obj.stats)}')
if len(stats_obj.keys()) > 0:
printtab(f'>>> gl.{plugin}.keys()')
printtab(f'{stats_obj.keys()}')
printtab(f'>>> gl.{plugin}["{stats_obj.keys()[0]}"]')
printtab(f'{pformat(stats_obj[stats_obj.keys()[0]])}')
print('')
if stats_obj.fields_description is not None:
print(f'{plugin.capitalize()} fields description:')
print('')
for field, description in stats_obj.fields_description.items():
print(f'* {field}: {description["description"]}')
print('')
print(f'{plugin.capitalize()} limits:')
print('')
print('.. code-block:: python')
print('')
printtab(f'>>> gl.{plugin}.limits')
printtab(f'{pformat(gl.__getattr__(plugin).limits)}')
print('')
def print_plugins(gl):
"""Print the details of all plugins."""
for plugin in [p for p in gl.plugins() if p not in ['help', 'programlist']]:
print_plugin(gl, plugin)
def print_auto_unit(gl):
sub_title = 'Use auto_unit to display a human-readable string with the unit'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use auto_unit() function to generate a human-readable string with the unit:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.mem["used"]')
printtab(f'{gl.mem["used"]}')
print('')
printtab('>>> gl.auto_unit(gl.mem["used"])')
printtab(f'{gl.auto_unit(gl.mem["used"])}')
print('')
print("""
Args:
number (float or int): The numeric value to be converted.
low_precision (bool, optional): If True, use lower precision for the output. Defaults to False.
min_symbol (str, optional): The minimum unit symbol to use (e.g., 'K' for kilo). Defaults to 'K'.
none_symbol (str, optional): The symbol to display if the number is None. Defaults to '-'.
Returns:
str: A human-readable string representation of the number with units.
""")
def print_bar(gl):
sub_title = 'Use to display stat as a bar'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use bar() function to generate a bar:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.bar(gl.mem["percent"])')
printtab(f'{gl.bar(gl.mem["percent"])}')
print('')
print("""
Args:
value (float): The percentage value to represent in the bar (typically between 0 and 100).
size (int, optional): The total length of the bar in characters. Defaults to 18.
bar_char (str, optional): The character used to represent the filled portion of the bar. Defaults to ''.
empty_char (str, optional): The character used to represent the empty portion of the bar. Defaults to ''.
pre_char (str, optional): A string to prepend to the bar. Defaults to ''.
post_char (str, optional): A string to append to the bar. Defaults to ''.
Returns:
str: A string representing the progress bar.
""")
def print_top_process(gl):
sub_title = 'Use to display top process list'
print(sub_title)
print('-' * len(sub_title))
print('')
print('Use top_process() function to generate a list of top processes sorted by CPU or MEM usage:')
print('')
print('.. code-block:: python')
print('')
printtab('>>> gl.top_process()')
printtab(f'{gl.top_process()}')
print('')
print("""
Args:
limit (int, optional): The maximum number of top processes to return. Defaults to 3.
sorted_by (str, optional): The primary key to sort processes by (e.g., 'cpu_percent').
Defaults to 'cpu_percent'.
sorted_by_secondary (str, optional): The secondary key to sort processes by if primary keys are equal
(e.g., 'memory_percent'). Defaults to 'memory_percent'.
Returns:
list: A list of dictionaries representing the top processes, excluding those with 'glances' in their
command line.
Note:
The 'glances' process is excluded from the returned list to avoid self-generated CPU load affecting
the results.
""")
class GlancesStdoutApiDoc:
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):
# Init
self.gl = api.GlancesAPI()
def end(self):
pass
def update(self, stats, duration=1):
"""Display issue"""
# Display header
print(APIDOC_HEADER)
# Display TL;DR section
print_tldr(self.gl)
# Init the API
print_init_api(self.gl)
# Display plugins list
print_plugins_list(self.gl)
# Loop over plugins
print_plugins(self.gl)
# Others helpers
print_auto_unit(self.gl)
print_bar(self.gl)
print_top_process(self.gl)
# Return True to exit directly (no refresh)
return True

View File

@ -6,33 +6,32 @@
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Fields description interface class."""
"""Generate Glances Restful API documentation."""
import json
import time
from pprint import pformat
from glances import __apiversion__
from glances.globals import iteritems
from glances.logger import logger
API_URL = f"http://localhost:61208/api/{__apiversion__}"
APIDOC_HEADER = f"""\
.. _api:
.. _api_restful:
API (Restfull/JSON) documentation
=================================
Restful/JSON API documentation
==============================
This documentation describes the Glances API version {__apiversion__} (Restfull/JSON) interface.
This documentation describes the Glances API version {__apiversion__} (Restful/JSON) interface.
An OpenAPI specification file is available at:
``https://raw.githubusercontent.com/nicolargo/glances/refs/heads/develop/docs/openapi.json``
``https://raw.githubusercontent.com/nicolargo/glances/refs/heads/develop/docs/api/openapi.json``
Run the Glances API server
--------------------------
The Glances Restfull/API server could be ran using the following command line:
The Glances Restful/API server could be ran using the following command line:
.. code-block:: bash
@ -136,7 +135,7 @@ def print_plugin_description(plugin, stat):
print('Fields descriptions:')
print('')
time_since_update = False
for field, description in iteritems(stat.fields_description):
for field, description in stat.fields_description.items():
print(
'* **{}**: {} (unit is *{}*)'.format(
field,
@ -354,7 +353,7 @@ def print_plugin_post_events():
print('')
class GlancesStdoutApiDoc:
class GlancesStdoutApiRestfulDoc:
"""This class manages the fields description display."""
def __init__(self, config=None, args=None):

View File

@ -55,12 +55,12 @@ class GlancesStdoutCsv:
line += f'{plugin}.{attribute}{self.separator}'
else:
if isinstance(stat, dict):
for k in stat.keys():
for k in stat:
line += f'{plugin}.{str(k)}{self.separator}'
elif isinstance(stat, list):
for i in stat:
if isinstance(i, dict) and 'key' in i:
for k in i.keys():
for k in i:
line += '{}.{}.{}{}'.format(plugin, str(i[i['key']]), str(k), self.separator)
else:
line += f'{plugin}{self.separator}'

View File

@ -0,0 +1,87 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
"""Fetch mode interface class."""
import jinja2
from glances import api
from glances.logger import logger
DEFAULT_FETCH_TEMPLATE = """
{{ gl.system['hostname'] }}{{ ' | ' + gl.ip['address'] if gl.ip['address'] else '' }} | Uptime: {{ gl.uptime }}
{{ gl.system['hr_name'] }}
💡 LOAD {{ '%0.2f'| format(gl.load['min1']) }}/min1 |\
{{ '%0.2f'| format(gl.load['min5']) }}/min5 |\
{{ '%0.2f'| format(gl.load['min15']) }}/min15
CPU {{ gl.bar(gl.cpu['total']) }} {{ gl.cpu['total'] }}% of {{ gl.core['log'] }} cores
🧠 MEM {{ gl.bar(gl.mem['percent']) }} {{ gl.mem['percent'] }}% ({{ gl.auto_unit(gl.mem['used']) }} /\
{{ gl.auto_unit(gl.mem['total']) }})
{% for fs in gl.fs.keys() %}\
💾 {% if loop.index == 1 %}DISK{% else %} {% endif %}\
{{ gl.bar(gl.fs[fs]['percent']) }} {{ gl.fs[fs]['percent'] }}% ({{ gl.auto_unit(gl.fs[fs]['used']) }} /\
{{ gl.auto_unit(gl.fs[fs]['size']) }}) for {{ fs }}
{% endfor %}\
{% for net in gl.network.keys() %}\
📡 {% if loop.index == 1 %}NET{% else %} {% endif %}\
{{ gl.auto_unit(gl.network[net]['bytes_recv_rate_per_sec']) }}b/s\
{{ gl.auto_unit(gl.network[net]['bytes_sent_rate_per_sec']) }}b/s for {{ net }}
{% endfor %}\
🔥 TOP PROCESS by CPU
{% for process in gl.top_process() %}\
{{ loop.index }} {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
{{ process['cpu_percent'] }}% CPU\
{{ ' ' * (8 - (gl.auto_unit(process['cpu_percent']) | length)) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM
{% endfor %}\
🔥 TOP PROCESS by MEM
{% for process in gl.top_process(sorted_by='memory_percent', sorted_by_secondary='cpu_percent') %}\
{{ loop.index }} {{ process['name'][:20] }}{{ ' ' * (20 - process['name'][:20] | length) }}\
🧠 {{ gl.auto_unit(process['memory_info']['rss']) }}B MEM\
{{ ' ' * (7 - (gl.auto_unit(process['memory_info']['rss']) | length)) }}\
{{ process['cpu_percent'] }}% CPU
{% endfor %}\
"""
class GlancesStdoutFetch:
"""This class manages the Stdout JSON display."""
def __init__(self, config=None, args=None):
# Init
self.config = config
self.args = args
self.gl = api.GlancesAPI(self.config, self.args)
def end(self):
pass
def update(self, stats, duration=3, cs_status=None, return_to_browser=False):
"""Display fetch from the template file to stdout."""
if self.args.fetch_template == "":
fetch_template = DEFAULT_FETCH_TEMPLATE
else:
logger.info("Using fetch template file: " + self.args.fetch_template)
# Load the template from the file given in the self.args.fetch_template argument
with open(self.args.fetch_template) as f:
fetch_template = f.read()
# Create a Jinja2 environment
jinja_env = jinja2.Environment(loader=jinja2.BaseLoader())
template = jinja_env.from_string(fetch_template)
output = template.render(gl=self.gl)
print(output)
# Return True to exit directly (no refresh)
return True

View File

@ -98,7 +98,7 @@ class GlancesStdoutIssue:
stat = stats.get_plugin(plugin).get_export()
# Hide private information
if plugin == 'ip':
for key in stat.keys():
for key in stat:
stat[key] = '***'
except Exception as e:
stat_error = e

View File

@ -17,6 +17,7 @@ _unicode_message = {
'PROCESS_SELECTOR': ['>', '>'],
'MEDIUM_LINE': ['\u2500', ''],
'LOW_LINE': ['\u2581', '_'],
'THREE_DOTS': ['\u2026', '...'],
}

View File

@ -33,6 +33,7 @@ hash -r
You must run the following command from the `glances/outputs/static/` directory.
```bash
.venv/bin/python ./generate_webui_conf.py > ./glances/outputs/static/js/uiconfig.json
cd glances/outputs/static/
```

View File

@ -187,6 +187,11 @@ export default {
const title = (data.stats && data.stats.system && data.stats.system.hostname) || '';
return title ? `${title} - Glances` : 'Glances';
},
topMenu() {
return this.config.outputs !== undefined && this.config.outputs.top_menu !== undefined
? this.config.outputs.top_menu.split(',')
: uiconfig.topMenu;
},
leftMenu() {
return this.config.outputs !== undefined && this.config.outputs.left_menu !== undefined
? this.config.outputs.left_menu.split(',')
@ -316,6 +321,17 @@ export default {
// 'B' => Switch between bit/s and IO/s for Disk IO
hotkeys('shift+B', () => {
this.store.args.diskio_iops = !this.store.args.diskio_iops;
if (this.store.args.diskio_iops) {
this.store.args.diskio_latency = false;
}
});
// 'L' => Switch to latency for Disk IO
hotkeys('shift+L', () => {
this.store.args.diskio_latency = !this.store.args.diskio_latency;
if (this.store.args.diskio_latency) {
this.store.args.diskio_iops = false;
}
});
// l => Show/hide alert logs

View File

@ -59,6 +59,9 @@
: $filters.bytes(container.limit)
}}
</td>
<td v-show="!getDisableStats().includes('ports')" scope="row" class="text-truncate">
{{ container.ports }}
</td>
<td v-show="!getDisableStats().includes('command')" scope="row" class="text-truncate">
{{ container.command }}
</td>
@ -94,6 +97,7 @@
<td v-show="!getDisableStats().includes('diskio')" scope="col">IOWps</td>
<td v-show="!getDisableStats().includes('networkio')" scope="col">RXps</td>
<td v-show="!getDisableStats().includes('networkio')" scope="col">TXps</td>
<td v-show="!getDisableStats().includes('ports')" scope="col">Ports</td>
<td v-show="!getDisableStats().includes('command')" scope="col">Command</td>
</tr>
</thead>
@ -159,6 +163,9 @@
: $filters.bits(container.network_tx)
}}
</td>
<td v-show="!getDisableStats().includes('ports')" scope="row">
{{ container.ports }}
</td>
<td v-show="!getDisableStats().includes('command')" scope="row" class="text-truncate">
{{ container.command }}
</td>
@ -228,6 +235,7 @@ export default {
io_wx: containerData.io_wx,
network_rx: containerData.network_rx,
network_tx: containerData.network_tx,
ports: containerData.ports,
command: containerData.command,
image: containerData.image,
engine: containerData.engine,

View File

@ -4,8 +4,10 @@
<thead>
<tr>
<th scope="col">DISK I/O</th>
<th v-show="!args.diskio_iops" scope="col" class="text-end w-25">Rps</th>
<th v-show="!args.diskio_iops" scope="col" class="text-end w-25">Wps</th>
<th v-show="!args.diskio_iops && !args.diskio_latency" scope="col" class="text-end w-25">Rps</th>
<th v-show="!args.diskio_iops && !args.diskio_latency" scope="col" class="text-end w-25">Wps</th>
<th v-show="args.diskio_latency" scope="col" class="text-end w-25">ms/opR</th>
<th v-show="args.diskio_latency" scope="col" class="text-end w-25">ms/opW</th>
<th v-show="args.diskio_iops" scope="col" class="text-end w-25">IORps</th>
<th v-show="args.diskio_iops" scope="col" class="text-end w-25">IOWps</th>
</tr>
@ -15,16 +17,22 @@
<td scope="row" class="text-truncate">
{{ $filters.minSize(disk.alias ? disk.alias : disk.name, 16) }}
</td>
<td
v-show="!args.diskio_iops" class="text-end w-25"
<td v-show="!args.diskio_iops && !args.diskio_latency" class="text-end w-25"
:class="getDecoration(disk.name, 'write_bytes_rate_per_sec')">
{{ disk.bitrate.txps }}
</td>
<td
v-show="!args.diskio_iops" class="text-end w-25"
<td v-show="!args.diskio_iops && !args.diskio_latency" class="text-end w-25"
:class="getDecoration(disk.name, 'read_bytes_rate_per_sec')">
{{ disk.bitrate.rxps }}
</td>
<td v-show="args.diskio_latency" class="text-end w-25"
:class="getDecoration(disk.name, 'write_latency')">
{{ disk.latency.txps }}
</td>
<td v-show="args.diskio_latency" class="text-end w-25"
:class="getDecoration(disk.name, 'read_latency')">
{{ disk.latency.rxps }}
</td>
<td v-show="args.diskio_iops" class="text-end w-25">
{{ disk.count.txps }}
</td>
@ -75,6 +83,10 @@ export default {
count: {
txps: bytes(diskioData['read_count_rate_per_sec']),
rxps: bytes(diskioData['write_count_rate_per_sec'])
},
latency: {
txps: bytes(diskioData['read_latency']),
rxps: bytes(diskioData['write_latency'])
}
};
}).filter(disk => {
@ -91,7 +103,11 @@ export default {
methods: {
getDecoration(diskName, field) {
if (this.view[diskName][field] == undefined) {
return;
if (this.view[field] == undefined) {
return;
} else {
return this.view[field].decoration.toLowerCase();
}
}
return this.view[diskName][field].decoration.toLowerCase();
}

View File

@ -17,11 +17,16 @@
<td scope="row">total:</td>
<td class="text-end"><span>{{ $filters.bytes(total) }}</span></td>
</tr>
<tr>
<tr v-if="!available_args">
<td scope="row">used:</td>
<td class="text-end" :class="getDecoration('used')"><span>{{
$filters.bytes(used, 2) }}</span></td>
</tr>
<tr v-if="available_args">
<td scope="row">avail:</td>
<td class="text-end" :class="getDecoration('available')"><span>{{
$filters.bytes(available, 2) }}</span></td>
</tr>
<tr>
<td scope="row">free:</td>
<td class="text-end" :class="getDecoration('free')"><span>{{
@ -78,13 +83,26 @@
</template>
<script>
import { store } from '../store.js';
export default {
props: {
data: {
type: Object
}
},
data() {
return {
store
};
},
computed: {
config() {
return this.store.config || {};
},
available_args() {
return this.config.mem.available || false;
},
stats() {
return this.data.stats['mem'];
},
@ -92,7 +110,7 @@ export default {
return this.data.views['mem'];
},
percent() {
return this.stats['percent'];
return this.stats['percent'].toFixed(1);
},
total() {
return this.stats['total'];
@ -100,6 +118,9 @@ export default {
used() {
return this.stats['used'];
},
available() {
return this.stats['available'];
},
free() {
return this.stats['free'];
},

View File

@ -11,7 +11,7 @@
<div>
<span>CPU Min/Max/Mean: </span>
<span class="careful">{{ $filters.number(extended_stats.cpu_min, 1)
}}% / {{
}}% / {{
$filters.number(extended_stats.cpu_max, 1) }}% / {{ $filters.number(extended_stats.cpu_mean, 1)
}}%</span>
<span>Affinity: </span>
@ -101,7 +101,7 @@
@click="$emit('update:sorter', 'memory_percent')">
MEM%
</td>
<td v-show="!getDisableStats().includes('memory_info')" scope="col">
<td v-show="!getDisableStats().includes('memory_info') && !getDisableVms()" scope="col">
VIRT
</td>
<td v-show="!getDisableStats().includes('memory_info')" scope="col">
@ -158,7 +158,7 @@
:class="getMemoryPercentAlert(process)">
{{ process.memory_percent == -1 ? '?' : $filters.number(process.memory_percent, 1) }}
</td>
<td v-show="!getDisableStats().includes('memory_info')" scope="row">
<td v-show="!getDisableStats().includes('memory_info') && !getDisableVms()" scope="row">
{{ $filters.bytes(process.memvirt) }}
</td>
<td v-show="!getDisableStats().includes('memory_info')" scope="row">
@ -331,7 +331,7 @@
:class="getMemoryPercentAlert(process)">
{{ process.memory_percent == -1 ? '?' : $filters.number(process.memory_percent, 1) }}
</td>
<td v-show="!getDisableStats().includes('memory_info')" scope="row">
<td v-show="!getDisableStats().includes('memory_info') && !getDisableVms()" scope="row">
{{ $filters.bytes(process.memvirt) }}
</td>
<td v-show="!getDisableStats().includes('memory_info')" scope="row">
@ -495,7 +495,7 @@ export default {
process.cmdline = process.cmdline.join(' ').replace(/\n/g, ' ');
}
if (process.cmdline === null || process.cmdline.length === 0) {
if (typeof process.cmdline !== "string" || process.cmdline.length === 0) {
process.cmdline = process.name;
}
@ -581,7 +581,7 @@ export default {
process.cmdline = process.cmdline.join(' ').replace(/\n/g, ' ');
}
if (process.cmdline === null || process.cmdline.length === 0) {
if (typeof process.cmdline !== "string" || process.cmdline.length === 0) {
process.cmdline = process.name;
}
return process
@ -595,6 +595,10 @@ export default {
getDisableStats() {
return GlancesHelper.getLimit('processlist', 'processlist_disable_stats') || [];
},
getDisableVms() {
const ret = GlancesHelper.getLimit('processlist', 'processlist_disable_virtual_memory') || ['False'];
return (ret[0].toLowerCase() === 'true') ? true : false;
},
setExtendedStats(pid) {
fetch('api/4/processes/extended/' + pid.toString(), { method: 'POST' })
.then((response) => response.json());

View File

@ -72,11 +72,11 @@ class GlancesStatsService {
const data = {
stats: response[0],
views: response[1],
isBsd: response[0]['system']['os_name'] === 'FreeBSD',
isLinux: response[0]['system']['os_name'] === 'Linux',
isSunOS: response[0]['system']['os_name'] === 'SunOS',
isMac: response[0]['system']['os_name'] === 'Darwin',
isWindows: response[0]['system']['os_name'] === 'Windows'
isBsd: response[0].system?.os_name === 'FreeBSD',
isLinux: response[0].system?.os_name === 'Linux',
isSunOS: response[0].system?.os_name === 'SunOS',
isMac: response[0].system?.os_name === 'Darwin',
isWindows: response[0].system?.os_name === 'Windows'
};
this.data = data;
store.data = data;

View File

@ -1,4 +1,13 @@
{
"topMenu": [
"quicklook",
"cpu",
"percpu",
"gpu",
"mem",
"memswap",
"load"
],
"leftMenu": [
"network",
"ports",

View File

@ -135,9 +135,9 @@
}
},
"node_modules/@eslint/config-array": {
"version": "0.20.0",
"resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.20.0.tgz",
"integrity": "sha512-fxlS1kkIjx8+vy2SjuCB94q3htSNrufYTXubwiBFeaQHbH6Ipi43gFJq2zCMt6PHhImH3Xmr0NksKDvchWlpQQ==",
"version": "0.21.0",
"resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz",
"integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
@ -150,9 +150,9 @@
}
},
"node_modules/@eslint/config-helpers": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.2.1.tgz",
"integrity": "sha512-RI17tsD2frtDu/3dmI7QRrD4bedNKPM08ziRYaC5AhkGrzIAJelm9kJU1TznK+apx6V+cqRz8tfpEeG3oIyjxw==",
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.0.tgz",
"integrity": "sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw==",
"dev": true,
"license": "Apache-2.0",
"engines": {
@ -160,9 +160,9 @@
}
},
"node_modules/@eslint/core": {
"version": "0.13.0",
"resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.13.0.tgz",
"integrity": "sha512-yfkgDw1KR66rkT5A8ci4irzDysN7FRpq3ttJolR88OqQikAWqwA8j5VZyas+vjyBNFIJ7MfybJ9plMILI2UrCw==",
"version": "0.15.1",
"resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.1.tgz",
"integrity": "sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
@ -210,13 +210,16 @@
}
},
"node_modules/@eslint/js": {
"version": "9.25.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.25.0.tgz",
"integrity": "sha512-iWhsUS8Wgxz9AXNfvfOPFSW4VfMXdVhp1hjkZVhXCrpgh/aLcc45rX6MPu+tIVUWDw0HfNwth7O28M1xDxNf9w==",
"version": "9.32.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.32.0.tgz",
"integrity": "sha512-BBpRFZK3eX6uMLKz8WxFOBIFFcGFJ/g8XuwjTHCqHROSIsopI+ddn/d5Cfh36+7+e5edVS8dbSHnBNhrLEX0zg==",
"dev": true,
"license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
"funding": {
"url": "https://eslint.org/donate"
}
},
"node_modules/@eslint/object-schema": {
@ -230,13 +233,13 @@
}
},
"node_modules/@eslint/plugin-kit": {
"version": "0.2.8",
"resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.2.8.tgz",
"integrity": "sha512-ZAoA40rNMPwSm+AeHpCq8STiNAwzWLJuP8Xv4CHIc9wv/PSuExjMrmjfYNj682vW0OOiZ1HKxzvjQr9XZIisQA==",
"version": "0.3.4",
"resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.4.tgz",
"integrity": "sha512-Ul5l+lHEcw3L5+k8POx6r74mxEYKG5kOb6Xpy2gCRW6zweT6TEhAf8vhxGgjhqrd/VO/Dirhsb+1hNpD1ue9hw==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"@eslint/core": "^0.13.0",
"@eslint/core": "^0.15.1",
"levn": "^0.4.1"
},
"engines": {
@ -1590,9 +1593,9 @@
}
},
"node_modules/acorn": {
"version": "8.14.1",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.1.tgz",
"integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==",
"version": "8.15.0",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"dev": true,
"license": "MIT",
"bin": {
@ -2175,9 +2178,9 @@
}
},
"node_modules/compression": {
"version": "1.8.0",
"resolved": "https://registry.npmjs.org/compression/-/compression-1.8.0.tgz",
"integrity": "sha512-k6WLKfunuqCYD3t6AsuPGvQWaKwuLLh2/xHNcX4qE+vIfDNXpSqnrhwA7O53R7WVQUnt8dVAIW+YHr7xTgOgGA==",
"version": "1.8.1",
"resolved": "https://registry.npmjs.org/compression/-/compression-1.8.1.tgz",
"integrity": "sha512-9mAqGPHLakhCLeNyxPkK4xVo746zQ/czLH1Ky+vkitMnWfWZps8r0qXuwhwizagCRttsL4lfG4pIOvaWLpAP0w==",
"dev": true,
"license": "MIT",
"dependencies": {
@ -2185,7 +2188,7 @@
"compressible": "~2.0.18",
"debug": "2.6.9",
"negotiator": "~0.6.4",
"on-headers": "~1.0.2",
"on-headers": "~1.1.0",
"safe-buffer": "5.2.1",
"vary": "~1.1.2"
},
@ -2825,20 +2828,20 @@
}
},
"node_modules/eslint": {
"version": "9.25.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-9.25.0.tgz",
"integrity": "sha512-MsBdObhM4cEwkzCiraDv7A6txFXEqtNXOb877TsSp2FCkBNl8JfVQrmiuDqC1IkejT6JLPzYBXx/xAiYhyzgGA==",
"version": "9.32.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-9.32.0.tgz",
"integrity": "sha512-LSehfdpgMeWcTZkWZVIJl+tkZ2nuSkyyB9C27MZqFWXuph7DvaowgcTvKqxvpLW1JZIk8PN7hFY3Rj9LQ7m7lg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.12.1",
"@eslint/config-array": "^0.20.0",
"@eslint/config-helpers": "^0.2.1",
"@eslint/core": "^0.13.0",
"@eslint/config-array": "^0.21.0",
"@eslint/config-helpers": "^0.3.0",
"@eslint/core": "^0.15.0",
"@eslint/eslintrc": "^3.3.1",
"@eslint/js": "9.25.0",
"@eslint/plugin-kit": "^0.2.8",
"@eslint/js": "9.32.0",
"@eslint/plugin-kit": "^0.3.4",
"@humanfs/node": "^0.16.6",
"@humanwhocodes/module-importer": "^1.0.1",
"@humanwhocodes/retry": "^0.4.2",
@ -2849,9 +2852,9 @@
"cross-spawn": "^7.0.6",
"debug": "^4.3.2",
"escape-string-regexp": "^4.0.0",
"eslint-scope": "^8.3.0",
"eslint-visitor-keys": "^4.2.0",
"espree": "^10.3.0",
"eslint-scope": "^8.4.0",
"eslint-visitor-keys": "^4.2.1",
"espree": "^10.4.0",
"esquery": "^1.5.0",
"esutils": "^2.0.2",
"fast-deep-equal": "^3.1.3",
@ -2921,9 +2924,9 @@
}
},
"node_modules/eslint-scope": {
"version": "8.3.0",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.3.0.tgz",
"integrity": "sha512-pUNxi75F8MJ/GdeKtVLSbYg4ZI34J6C0C7sbL4YOp2exGwen7ZsuBqKzUhXd0qMQ362yET3z+uPwKeg/0C2XCQ==",
"version": "8.4.0",
"resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz",
"integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==",
"dev": true,
"license": "BSD-2-Clause",
"dependencies": {
@ -2938,9 +2941,9 @@
}
},
"node_modules/eslint-visitor-keys": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz",
"integrity": "sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw==",
"version": "4.2.1",
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz",
"integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==",
"dev": true,
"license": "Apache-2.0",
"engines": {
@ -2951,15 +2954,15 @@
}
},
"node_modules/espree": {
"version": "10.3.0",
"resolved": "https://registry.npmjs.org/espree/-/espree-10.3.0.tgz",
"integrity": "sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg==",
"version": "10.4.0",
"resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz",
"integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==",
"dev": true,
"license": "BSD-2-Clause",
"dependencies": {
"acorn": "^8.14.0",
"acorn": "^8.15.0",
"acorn-jsx": "^5.3.2",
"eslint-visitor-keys": "^4.2.0"
"eslint-visitor-keys": "^4.2.1"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@ -4746,9 +4749,9 @@
}
},
"node_modules/on-headers": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
"integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==",
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz",
"integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==",
"dev": true,
"license": "MIT",
"engines": {

File diff suppressed because one or more lines are too long

View File

@ -15,7 +15,7 @@ Supported Cloud API:
import threading
from glances.globals import iteritems, to_ascii
from glances.globals import to_ascii
from glances.logger import logger
from glances.plugins.plugin.model import GlancesPluginModel
@ -51,18 +51,24 @@ class CloudPlugin(GlancesPluginModel):
# Init the stats
self.reset()
# Init thread to grab OpenStack stats asynchronously
self.OPENSTACK = ThreadOpenStack()
self.OPENSTACKEC2 = ThreadOpenStackEC2()
# Enable threads only if the plugin is enabled
self.OPENSTACK = None
self.OPENSTACKEC2 = None
if self.is_enabled():
# Init thread to grab OpenStack stats asynchronously
self.OPENSTACK = ThreadOpenStack()
self.OPENSTACKEC2 = ThreadOpenStackEC2()
# Run the thread
self.OPENSTACK.start()
self.OPENSTACKEC2.start()
# Run the thread
self.OPENSTACK.start()
self.OPENSTACKEC2.start()
def exit(self):
"""Overwrite the exit method to close threads."""
self.OPENSTACK.stop()
self.OPENSTACKEC2.stop()
if self.OPENSTACK:
self.OPENSTACK.stop()
if self.OPENSTACKEC2:
self.OPENSTACKEC2.stop()
# Call the father class
super().exit()
@ -81,7 +87,7 @@ class CloudPlugin(GlancesPluginModel):
return stats
# Update the stats
if self.input_method == 'local':
if self.input_method == 'local' and (self.OPENSTACK or self.OPENSTACKEC2):
stats = self.OPENSTACK.stats
if not stats:
stats = self.OPENSTACKEC2.stats
@ -159,7 +165,7 @@ class ThreadOpenStack(threading.Thread):
self.stop()
return False
for k, v in iteritems(self.OPENSTACK_API_METADATA):
for k, v in self.OPENSTACK_API_METADATA.items():
r_url = f'{self.OPENSTACK_API_URL}/{v}'
try:
# Local request, a timeout of 3 seconds is OK

View File

@ -79,6 +79,9 @@ fields_description = {
'description': 'Container network TX bitrate',
'unit': 'bitpersecond',
},
'ports': {
'description': 'Container ports',
},
'uptime': {
'description': 'Container uptime',
},
@ -392,6 +395,9 @@ class ContainersPlugin(GlancesPluginModel):
if 'networkio' not in self.disable_stats:
msgs.extend(['{:>7}'.format('Rx/s'), ' {:<7}'.format('Tx/s')])
if 'ports' not in self.disable_stats:
msgs.extend('{:16}'.format('Ports'))
if 'command' not in self.disable_stats:
msgs.append(' {:8}'.format('Command'))
@ -490,6 +496,15 @@ class ContainersPlugin(GlancesPluginModel):
return build_with_this_args
def build_ports(self, ret, container):
if container.get('ports', '') != '':
msg = '{:16}'.format(container['ports'])
else:
msg = '{:16}'.format('_')
ret.append(self.curse_add_line(msg, splittable=True))
return ret
def build_cmd_line(self, ret, container):
if container['command'] is not None:
msg = ' {}'.format(container['command'])
@ -539,9 +554,9 @@ class ContainersPlugin(GlancesPluginModel):
'mem': self.build_memory_line,
'diskio': self.build_io_line,
'networkio': self.build_net_line(args),
'ports': self.build_ports,
'command': self.build_cmd_line,
}
steps.extend(v for k, v in options.items() if k not in self.disable_stats)
return reduce(lambda ret, step: step(ret, container), steps, ret)
@ -579,3 +594,5 @@ def sort_docker_stats(stats: list[dict[str, Any]]) -> tuple[str, list[dict[str,
# Return the main sort key and the sorted stats
return sort_by, stats
# Return the main sort key and the sorted stats
return sort_by, stats

View File

@ -312,6 +312,7 @@ class DockerExtension:
'memory_percent': None,
'network_rx': None,
'network_tx': None,
'ports': '',
'uptime': None,
}
@ -358,4 +359,13 @@ class DockerExtension:
# Manage special chars in command (see issue#2733)
stats['command'] = replace_special_chars(' '.join(stats['command']))
# Manage ports (see issue#2054)
if hasattr(container, 'ports'):
stats['ports'] = ','.join(
[
f'{container.ports[cp][0]["HostPort"]}->{cp}' if container.ports[cp] else f'{cp}'
for cp in container.ports
]
)
return stats

View File

@ -371,6 +371,7 @@ class PodmanExtension:
'memory_percent': None,
'network_rx': None,
'network_tx': None,
'ports': '',
'uptime': None,
}
@ -403,4 +404,13 @@ class PodmanExtension:
# Manage special chars in command (see issue#2733)
stats['command'] = replace_special_chars(' '.join(stats['command']))
# Manage ports (see issue#2054)
if hasattr(container, 'ports'):
stats['ports'] = ','.join(
[
f'{container.ports[cp][0]["HostPort"]}->{cp}' if container.ports[cp] else f'{cp}'
for cp in container.ports
]
)
return stats

View File

@ -11,7 +11,7 @@
import psutil
from glances.cpu_percent import cpu_percent
from glances.globals import LINUX, SUNOS, WINDOWS, iterkeys
from glances.globals import LINUX, SUNOS, WINDOWS
from glances.plugins.core import CorePlugin
from glances.plugins.plugin.model import GlancesPluginModel
@ -271,7 +271,7 @@ class CpuPlugin(GlancesPluginModel):
return self.stats
# Convert SNMP stats to float
for key in iterkeys(stats):
for key in stats:
stats[key] = float(stats[key])
stats['total'] = 100 - stats['idle']

View File

@ -42,6 +42,24 @@ fields_description = {
'rate': True,
'unit': 'byte',
},
'read_time': {
'description': 'Time spent reading.',
'rate': True,
'unit': 'millisecond',
},
'write_time': {
'description': 'Time spent writing.',
'rate': True,
'unit': 'millisecond',
},
'read_latency': {
'description': 'Mean time spent reading per operation.',
'unit': 'millisecond',
},
'write_latency': {
'description': 'Mean time spent writing per operation.',
'unit': 'millisecond',
},
}
# Define the history items list
@ -94,6 +112,9 @@ class DiskioPlugin(GlancesPluginModel):
# Update the stats
if self.input_method == 'local':
stats = self.update_local()
# Compute latency (need rate stats, so should be done after decorator)
stats = self.update_latency(stats)
else:
stats = self.get_init_value()
@ -102,6 +123,23 @@ class DiskioPlugin(GlancesPluginModel):
return self.stats
def update_latency(self, stats):
"""Update the latency stats."""
# Compute read/write latency if we have the rate stats
for stat in stats:
# Compute read/write latency if we have the rate stats
if stat.get("read_count_rate_per_sec", 0) > 0:
stat["read_latency"] = int(stat["read_time_rate_per_sec"] / stat["read_count_rate_per_sec"])
else:
stat["read_latency"] = 0
if stat.get("write_count_rate_per_sec", 0) > 0:
stat["write_latency"] = int(stat["write_time_rate_per_sec"] / stat["write_count_rate_per_sec"])
else:
stat["write_latency"] = 0
return stats
@GlancesPluginModel._manage_rate
def update_local(self):
stats = self.get_init_value()
@ -143,22 +181,31 @@ class DiskioPlugin(GlancesPluginModel):
# Call the father's method
super().update_views()
# Add specifics information
# Alert
for i in self.get_raw():
disk_real_name = i['disk_name']
# Skip alert if no timespan to measure
if not i.get('read_bytes_rate_per_sec') or not i.get('write_bytes_rate_per_sec'):
continue
# # Skip alert if no timespan to measure
# if not i.get('read_bytes_rate_per_sec') or not i.get('write_bytes_rate_per_sec'):
# continue
# Decorate the bitrate with the configuration file
alert_rx = self.get_alert(i['read_bytes'], header=disk_real_name + '_rx')
alert_tx = self.get_alert(i['write_bytes'], header=disk_real_name + '_tx')
self.views[i[self.get_key()]]['read_bytes']['decoration'] = alert_rx
self.views[i[self.get_key()]]['read_bytes_rate_per_sec']['decoration'] = alert_rx
self.views[i[self.get_key()]]['write_bytes']['decoration'] = alert_tx
self.views[i[self.get_key()]]['write_bytes_rate_per_sec']['decoration'] = alert_tx
# Decorate the latency with the configuration file
# Try to get the read/write latency for the current disk
alert_latency_rx = self.get_alert(i['read_latency'], header=disk_real_name + '_rx_latency')
alert_latency_tx = self.get_alert(i['write_latency'], header=disk_real_name + '_tx_latency')
# If the alert is not defined, use the default one
if alert_latency_rx == 'DEFAULT':
alert_latency_rx = self.get_alert(i['read_latency'], header='rx_latency')
if alert_latency_tx == 'DEFAULT':
alert_latency_tx = self.get_alert(i['write_latency'], header='tx_latency')
self.views[i[self.get_key()]]['read_latency']['decoration'] = alert_latency_rx
self.views[i[self.get_key()]]['write_latency']['decoration'] = alert_latency_tx
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
@ -185,6 +232,11 @@ class DiskioPlugin(GlancesPluginModel):
ret.append(self.curse_add_line(msg))
msg = '{:>7}'.format('IOW/s')
ret.append(self.curse_add_line(msg))
elif args.diskio_latency:
msg = '{:>8}'.format('ms/opR')
ret.append(self.curse_add_line(msg))
msg = '{:>7}'.format('ms/opW')
ret.append(self.curse_add_line(msg))
else:
msg = '{:>8}'.format('R/s')
ret.append(self.curse_add_line(msg))
@ -220,6 +272,22 @@ class DiskioPlugin(GlancesPluginModel):
msg, self.get_views(item=i[self.get_key()], key='write_count', option='decoration')
)
)
elif args.diskio_latency:
# latency (mean time spent reading/writing per operation)
txps = self.auto_unit(i.get('read_latency', None), low_precision=True)
rxps = self.auto_unit(i.get('write_latency', None), low_precision=True)
msg = f'{txps:>7}'
ret.append(
self.curse_add_line(
msg, self.get_views(item=i[self.get_key()], key='read_latency', option='decoration')
)
)
msg = f'{rxps:>7}'
ret.append(
self.curse_add_line(
msg, self.get_views(item=i[self.get_key()], key='write_latency', option='decoration')
)
)
else:
# Bitrate
txps = self.auto_unit(i.get('read_bytes_rate_per_sec', None))

View File

@ -12,7 +12,7 @@ import operator
import psutil
from glances.globals import PermissionError, nativestr, u
from glances.globals import PermissionError, exit_after, nativestr, u
from glances.logger import logger
from glances.plugins.plugin.model import GlancesPluginModel
@ -88,6 +88,17 @@ snmp_oid['esxi'] = snmp_oid['windows']
items_history_list = [{'name': 'percent', 'description': 'File system usage in percent', 'y_unit': '%'}]
@exit_after(2, default=None)
def get_disk_usage(fs):
"""Return all partitions."""
try:
return psutil.disk_usage(fs.mountpoint)
except OSError:
# Disk is ejected during the command
logger.debug("Plugin - fs: PsUtil fetch failed")
return None
class FsPlugin(GlancesPluginModel):
"""Glances file system plugin.
@ -126,53 +137,50 @@ class FsPlugin(GlancesPluginModel):
return self.stats
def get_disk_partitions(self, *, fetch_all: bool = False):
"""Return all partitions."""
try:
# Grab the stats using the psutil disk_partitions
# If fetch_all is False, then returns physical devices only (e.g. hard disks, cd-rom drives, USB keys)
# and ignore all others (e.g. memory partitions such as /dev/shm)
# Else return all mount points (including logical mount points like NFS, tmpfs, shm, ...)
return psutil.disk_partitions(all=fetch_all)
except (UnicodeDecodeError, PermissionError):
logger.debug("Plugin - fs: PsUtil fetch failed")
return []
def update_local(self):
"""Update the FS stats using the input method."""
# Init new stats
stats = self.get_init_value()
# Update stats using the standard system lib
# Grab the stats using the psutil disk_partitions
# If 'all'=False return physical devices only (e.g. hard disks, cd-rom drives, USB keys)
# and ignore all others (e.g. memory partitions such as /dev/shm)
try:
fs_stat = psutil.disk_partitions(all=False)
except (UnicodeDecodeError, PermissionError):
logger.debug("Plugin - fs: PsUtil fetch failed")
return stats
fs_stat = self.get_disk_partitions()
# Optional hack to allow logical mounts points (issue #448)
allowed_fs_types = self.get_conf_value('allow')
if allowed_fs_types:
# Avoid Psutil call unless mounts need to be allowed
try:
all_mounted_fs = psutil.disk_partitions(all=True)
except (UnicodeDecodeError, PermissionError):
logger.debug("Plugin - fs: PsUtil extended fetch failed")
else:
# Discard duplicates (#2299) and add entries matching allowed fs types
tracked_mnt_points = {f.mountpoint for f in fs_stat}
for f in all_mounted_fs:
if (
any(f.fstype.find(fs_type) >= 0 for fs_type in allowed_fs_types)
and f.mountpoint not in tracked_mnt_points
):
fs_stat.append(f)
all_mounted_fs = self.get_disk_partitions(fetch_all=True)
# Discard duplicates (#2299) and add entries matching allowed fs types
tracked_mnt_points = {f.mountpoint for f in fs_stat}
for f in all_mounted_fs:
if (
any(f.fstype.find(fs_type) >= 0 for fs_type in allowed_fs_types)
and f.mountpoint not in tracked_mnt_points
):
fs_stat.append(f)
# Loop over fs
for fs in fs_stat:
# Hide the stats if the mount point is in the exclude list
# # It avoids unnecessary call to PsUtil disk_usage
# It avoids unnecessary call to PsUtil disk_usage
if not self.is_display_any(fs.mountpoint, fs.device):
continue
# Grab the disk usage
try:
fs_usage = psutil.disk_usage(fs.mountpoint)
except OSError:
# Correct issue #346
# Disk is ejected during the command
fs_usage = get_disk_usage(fs)
if fs_usage is None:
continue
fs_current = {
'device_name': fs.device,

34
glances/plugins/fs/zfs.py Normal file
View File

@ -0,0 +1,34 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
# For the moment, thoses functions are only used in the MEM plugin (see #3979)
import os
from glances.logger import logger
def zfs_enable(zfs_stats_path='/proc/spl/kstat/zfs'):
"""Check if ZFS is enabled on this system."""
return os.path.isdir(zfs_stats_path)
def zfs_stats(zfs_stats_files=['/proc/spl/kstat/zfs/arcstats']):
"""Get ZFS stats from /proc/spl/kstat/zfs files."""
stats = {}
for zfs_stats_file in zfs_stats_files:
try:
with open(zfs_stats_file) as f:
lines = f.readlines()
namespace = os.path.basename(zfs_stats_file)
for line in lines[2:]: # Skip the first two header lines
parts = line.split()
stats[namespace + '.' + parts[0]] = int(parts[2])
except Exception as e:
logger.error(f"Error reading ZFS stats in {zfs_stats_file}: {e}")
return stats

View File

@ -15,7 +15,6 @@ Just a stupid plugin to display the help screen.
from itertools import chain
from glances import __version__, psutil_version
from glances.globals import iteritems
from glances.plugins.plugin.model import GlancesPluginModel
@ -172,7 +171,7 @@ class HelpPlugin(GlancesPluginModel):
#
shortcuts = []
collecting = False
for k, v in iteritems(self.view_data):
for k, v in self.view_data.items():
if collecting:
pass
elif k == 'header_sort':

View File

@ -19,18 +19,18 @@ from glances.timer import Timer, getTimeSinceLastUpdate
try:
import netifaces
except ImportError as e:
import_error_tag = True
netifaces_tag = True
logger.warning(f"Missing Python Lib ({e}), IP plugin is disabled")
else:
import_error_tag = False
netifaces_tag = False
try:
netifaces.default_gateway()
except Exception:
import_error_tag = True
netifaces_tag = True
logger.warning("Netifaces2 should be installed in your Python environment, IP plugin is disabled")
else:
import_error_tag = False
netifaces_tag = False
# Fields description
@ -150,7 +150,7 @@ class IpPlugin(GlancesPluginModel):
# Init new stats
stats = self.get_init_value()
if self.input_method == 'local' and not import_error_tag:
if self.input_method == 'local' and not netifaces_tag:
stats = self.get_stats_for_local_input(stats)
elif self.input_method == 'snmp':
@ -178,13 +178,9 @@ class IpPlugin(GlancesPluginModel):
ret = []
# Only process if stats exist and display plugin enable...
if not self.stats or self.is_disabled() or import_error_tag:
if not self.stats or self.is_disabled() or netifaces_tag:
return ret
# Build the string message
msg = ' - '
ret.append(self.curse_add_line(msg, optional=True))
# Start with the private IP information
if 'address' in self.stats:
msg = 'IP '

View File

@ -12,7 +12,6 @@ import os
import psutil
from glances.globals import iteritems
from glances.logger import logger
from glances.plugins.core import CorePlugin
from glances.plugins.plugin.model import GlancesPluginModel
@ -114,7 +113,7 @@ class LoadPlugin(GlancesPluginModel):
# Python 3 return a dict like:
# {'min1': "b'0.08'", 'min5': "b'0.12'", 'min15': "b'0.15'"}
for k, v in iteritems(stats):
for k, v in stats.items():
stats[k] = float(v)
stats['cpucore'] = get_nb_log_core()

View File

@ -1,7 +1,7 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2022 Nicolas Hennion <nicolas@nicolargo.com>
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
@ -10,6 +10,7 @@
import psutil
from glances.plugins.fs.zfs import zfs_enable, zfs_stats
from glances.plugins.plugin.model import GlancesPluginModel
# Fields description
@ -22,6 +23,7 @@ different memory values depending on the platform (e.g. free + buffers + cached
and it is supposed to be used to monitor actual memory usage in a cross platform fashion.',
'unit': 'bytes',
'min_symbol': 'K',
'short_name': 'avail',
},
'percent': {
'description': 'The percentage usage calculated as (total - available) / total * 100.',
@ -60,7 +62,7 @@ note that this doesn\'t reflect the actual memory available (use \'available\' i
'optional': True,
},
'cached': {
'description': '*(Linux, BSD)*: cache for various things.',
'description': '*(Linux, BSD)*: cache for various things (including ZFS cache).',
'unit': 'bytes',
'min_symbol': 'K',
'optional': True,
@ -124,6 +126,12 @@ class MemPlugin(GlancesPluginModel):
args=args, config=config, items_history_list=items_history_list, fields_description=fields_description
)
# Should we display available memory instead of used memory ?
self.available = self.get_conf_value('available', default=['False'])[0].lower() == 'true'
# ZFS
self.zfs_enabled = zfs_enable()
# We want to display the stat in the curse interface
self.display_curse = True
@ -167,15 +175,40 @@ class MemPlugin(GlancesPluginModel):
if hasattr(vm_stats, mem):
stats[mem] = getattr(vm_stats, mem)
# Use the 'free'/htop calculation
# free=available+buffer+cached
stats['free'] = stats['available']
if hasattr(stats, 'buffers'):
stats['free'] += stats['buffers']
if hasattr(stats, 'cached'):
stats['free'] += stats['cached']
# used=total-free
stats['used'] = stats['total'] - stats['free']
# Manage ZFS cache (see #3979 for details)
if self.zfs_enabled:
zfs_size = 0
zfs_shrink = 0
zfs_cache_stats = zfs_stats()
# Uncomment the following line to use the test data
# zfs_cache_stats = zfs_stats(['./tests-data/plugins/fs/zfs/arcstats'])
if 'arcstats.size' in zfs_cache_stats:
zfs_size = zfs_cache_stats['arcstats.size']
if 'arcstats.c_min' in zfs_cache_stats:
zfs_cmin = zfs_cache_stats['arcstats.c_min']
else:
zfs_cmin = 0
zfs_shrink = zfs_size - zfs_cmin
# Add the ZFS cache to the 'cached' memory
if 'cached' in stats:
stats['cached'] += zfs_size
else:
stats['cached'] = zfs_size
# Add the amount ZFS cache can shrink to 'available' memory
if 'available' in stats:
stats['available'] += zfs_shrink
else:
stats['available'] = zfs_shrink
# Subtract the amount ZFS cache can shrink from 'used' memory
stats['used'] -= zfs_shrink
# Update percent to reflect new 'available' value
stats['percent'] = round(float((stats['total'] - stats['available']) / stats['total'] * 100), 1)
stats['used'] = stats['total'] - stats['available']
return stats
@ -279,7 +312,10 @@ class MemPlugin(GlancesPluginModel):
# used + buffers
ret.append(self.curse_new_line())
# Used memory usage
ret.extend(self.curse_add_stat('used', width=15))
if self.available:
ret.extend(self.curse_add_stat('available', width=15))
else:
ret.extend(self.curse_add_stat('used', width=15))
# Buffers memory usage
ret.extend(self.curse_add_stat('buffers', width=16, header=' '))

View File

@ -10,7 +10,6 @@
import psutil
from glances.globals import iterkeys
from glances.plugins.plugin.model import GlancesPluginModel
from glances.timer import getTimeSinceLastUpdate
@ -125,7 +124,7 @@ class MemswapPlugin(GlancesPluginModel):
self.reset()
return stats
for key in iterkeys(stats):
for key in stats:
if stats[key] != '':
stats[key] = float(stats[key]) * 1024

View File

@ -0,0 +1,68 @@
#
# This file is part of Glances.
#
# SPDX-FileCopyrightText: 2025 Nicolas Hennion <nicolas@nicolargo.com>
#
# SPDX-License-Identifier: LGPL-3.0-only
#
# Glances DAG (direct acyclic graph) for plugins dependencies.
# It allows to define DAG dependencies between plugins
# For the moment, it will be used only for Restful API interface
_plugins_graph = {
'*': ['alert'], # All plugins depend on alert plugin
'cpu': ['core'],
'load': ['core'],
'processlist': ['core', 'processcount'],
'programlist': ['processcount'],
'quicklook': ['fs', 'load'],
'vms': ['processcount'],
}
def get_plugin_dependencies(plugin_name, _graph=_plugins_graph):
"""Return all transitive dependencies for a given plugin (including global ones)."""
seen = set()
def _resolve(plugin):
if plugin in seen:
return
seen.add(plugin)
# Get direct dependencies of this plugin
deps = _graph.get(plugin, [])
for dep in deps:
_resolve(dep)
# Resolve dependencies for this plugin
_resolve(plugin_name)
# Add global ("*") dependencies
for dep in _graph.get('*', []):
_resolve(dep)
# Remove the plugin itself if present
seen.discard(plugin_name)
# Preserve order of discovery (optional, for deterministic results)
result = []
added = set()
for dep in _graph.get(plugin_name, []) + _graph.get('*', []):
for d in _dfs_order(dep, _graph, set()):
if d not in added and d != plugin_name:
result.append(d)
added.add(d)
return [plugin_name] + result
def _dfs_order(plugin, graph, seen):
"""Helper to preserve depth-first order."""
if plugin in seen:
return []
seen.add(plugin)
order = []
for dep in graph.get(plugin, []):
order.extend(_dfs_order(dep, graph, seen))
order.append(plugin)
return order

View File

@ -17,7 +17,16 @@ import re
from glances.actions import GlancesActions
from glances.events_list import glances_events
from glances.globals import dictlist, dictlist_json_dumps, iterkeys, itervalues, json_dumps, listkeys, mean, nativestr
from glances.globals import (
auto_unit,
dictlist,
dictlist_json_dumps,
json_dumps,
list_to_dict,
listkeys,
mean,
nativestr,
)
from glances.history import GlancesHistory
from glances.logger import logger
from glances.outputs.glances_unicode import unicode_message
@ -128,14 +137,43 @@ class GlancesPluginModel:
self.stats_previous = None
self.reset()
def __repr__(self):
"""Return the raw stats."""
return str(self.stats)
def __str__(self):
"""Return the human-readable stats."""
return str(self.stats)
def __repr__(self):
"""Return the raw stats."""
if isinstance(self.stats, list):
return str(list_to_dict(self.stats))
return str(self.stats)
def __getitem__(self, item):
"""Return the stats item."""
if isinstance(self.stats, dict) and item in self.stats:
return self.stats[item]
if isinstance(self.stats, list):
ltd = list_to_dict(self.stats)
if item in ltd:
return ltd[item]
raise KeyError(f"'{self.__class__.__name__}' object has no key '{item}'")
def keys(self):
"""Return the keys of the stats."""
if isinstance(self.stats, dict):
return listkeys(self.stats)
if isinstance(self.stats, list):
return listkeys(list_to_dict(self.stats))
return []
def get(self, item, default=None):
"""Return the stats item or default if not found."""
try:
return self[item]
except KeyError:
return default
def get_init_value(self):
"""Return a copy of the init value."""
return copy.copy(self.stats_init_value)
@ -188,6 +226,9 @@ class GlancesPluginModel:
def update_stats_history(self):
"""Update stats history."""
# Exit if no history
if not self.history_enable():
return
# Build the history
_get_export = self.get_export()
if not (_get_export and self.history_enable()):
@ -331,16 +372,14 @@ class GlancesPluginModel:
ret = {}
if bulk:
# Bulk request
snmp_result = snmp_client.getbulk_by_oid(0, 10, *list(itervalues(snmp_oid)))
snmp_result = snmp_client.getbulk_by_oid(0, 10, *list(snmp_oid.values()))
logger.info(snmp_result)
if len(snmp_oid) == 1:
# Bulk command for only one OID
# Note: key is the item indexed but the OID result
for item in snmp_result:
if iterkeys(item)[0].startswith(itervalues(snmp_oid)[0]):
ret[iterkeys(snmp_oid)[0] + iterkeys(item)[0].split(itervalues(snmp_oid)[0])[1]] = itervalues(
item
)[0]
if item.keys()[0].startswith(snmp_oid.values()[0]):
ret[snmp_oid.keys()[0] + item.keys()[0].split(snmp_oid.values()[0])[1]] = item.values()[0]
else:
# Build the internal dict with the SNMP result
# Note: key is the first item in the snmp_oid
@ -348,7 +387,7 @@ class GlancesPluginModel:
for item in snmp_result:
item_stats = {}
item_key = None
for key in iterkeys(snmp_oid):
for key in snmp_oid:
oid = snmp_oid[key] + '.' + str(index)
if oid in item:
if item_key is None:
@ -360,10 +399,10 @@ class GlancesPluginModel:
index += 1
else:
# Simple get request
snmp_result = snmp_client.get_by_oid(*list(itervalues(snmp_oid)))
snmp_result = snmp_client.get_by_oid(*list(snmp_oid.values()))
# Build the internal dict with the SNMP result
for key in iterkeys(snmp_oid):
for key in snmp_oid:
ret[key] = snmp_result[snmp_oid[key]]
return ret
@ -704,7 +743,7 @@ class GlancesPluginModel:
# Manage log
log_str = ""
if self.get_limit_log(stat_name=stat_name, default_action=log):
if self.get_limit_log(stat_name=stat_name, default_action=log) and ret != 'DEFAULT':
# Add _LOG to the return string
# So stats will be highlighted with a specific color
log_str = "_LOG"
@ -821,7 +860,7 @@ class GlancesPluginModel:
return self._limits[self.plugin_name + '_log'][0].lower() == 'true'
return default_action
def get_conf_value(self, value, header="", plugin_name=None, default=[]):
def get_conf_value(self, value, header="", plugin_name=None, convert_bool=False, default=[]):
"""Return the configuration (header_) value for the current plugin.
...or the one given by the plugin_name var.
@ -835,7 +874,8 @@ class GlancesPluginModel:
plugin_name = plugin_name + '_' + header
try:
return self._limits[plugin_name + '_' + value]
ret = self._limits[plugin_name + '_' + value]
return bool(ret[0]) if convert_bool else ret
except KeyError:
return default
@ -1058,59 +1098,8 @@ class GlancesPluginModel:
self._align = value
def auto_unit(self, number, low_precision=False, min_symbol='K', none_symbol='-'):
"""Make a nice human-readable string out of number.
Number of decimal places increases as quantity approaches 1.
CASE: 613421788 RESULT: 585M low_precision: 585M
CASE: 5307033647 RESULT: 4.94G low_precision: 4.9G
CASE: 44968414685 RESULT: 41.9G low_precision: 41.9G
CASE: 838471403472 RESULT: 781G low_precision: 781G
CASE: 9683209690677 RESULT: 8.81T low_precision: 8.8T
CASE: 1073741824 RESULT: 1024M low_precision: 1024M
CASE: 1181116006 RESULT: 1.10G low_precision: 1.1G
:low_precision: returns less decimal places potentially (default is False)
sacrificing precision for more readability.
:min_symbol: Do not approach if number < min_symbol (default is K)
:decimal_count: if set, force the number of decimal number (default is None)
"""
if number is None:
return none_symbol
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
if min_symbol in symbols:
symbols = symbols[symbols.index(min_symbol) :]
prefix = {
'Y': 1208925819614629174706176,
'Z': 1180591620717411303424,
'E': 1152921504606846976,
'P': 1125899906842624,
'T': 1099511627776,
'G': 1073741824,
'M': 1048576,
'K': 1024,
}
if number == 0:
# Avoid 0.0
return '0'
for symbol in reversed(symbols):
value = float(number) / prefix[symbol]
if value > 1:
decimal_precision = 0
if value < 10:
decimal_precision = 2
elif value < 100:
decimal_precision = 1
if low_precision:
if symbol in 'MK':
decimal_precision = 0
else:
decimal_precision = min(1, decimal_precision)
elif symbol in 'K':
decimal_precision = 0
return '{:.{decimal}f}{symbol}'.format(value, decimal=decimal_precision, symbol=symbol)
return f'{number!s}'
"""Return a nice human-readable string out of number."""
return auto_unit(number, low_precision=low_precision, min_symbol=min_symbol, none_symbol=none_symbol)
def trend_msg(self, trend, significant=1):
"""Return the trend message.

View File

@ -47,7 +47,8 @@ fields_description = {
'unit': 'number',
},
'cpu_percent': {
'description': 'Process CPU consumption',
'description': 'Process CPU consumption \
(returned value can be > 100.0 in case of a process running multiple threads on different CPU cores)',
'unit': 'percent',
},
'memory_percent': {
@ -291,7 +292,7 @@ class ProcesslistPlugin(GlancesPluginModel):
msg = cpu_layout.format(p['cpu_percent'])
alert = self.get_alert(
p['cpu_percent'],
highlight_zero=False,
highlight_zero=True,
is_max=(p['cpu_percent'] == self.max_values['cpu_percent']),
header="cpu",
)
@ -307,7 +308,7 @@ class ProcesslistPlugin(GlancesPluginModel):
msg = self.layout_stat['mem'].format(p['memory_percent'])
alert = self.get_alert(
p['memory_percent'],
highlight_zero=False,
highlight_zero=True,
is_max=(p['memory_percent'] == self.max_values['memory_percent']),
header="mem",
)
@ -338,10 +339,11 @@ class ProcesslistPlugin(GlancesPluginModel):
return ret
def _get_process_curses_memory_info(self, p, selected, args):
return [
self._get_process_curses_vms(p, selected, args),
self._get_process_curses_rss(p, selected, args),
]
ret = []
if not self.get_conf_value('disable_virtual_memory', convert_bool=True, default=False):
ret.append(self._get_process_curses_vms(p, selected, args))
ret.append(self._get_process_curses_rss(p, selected, args))
return ret
def _get_process_curses_pid(self, p, selected, args):
"""Return process PID curses"""
@ -474,7 +476,8 @@ class ProcesslistPlugin(GlancesPluginModel):
msg = self.layout_stat['command'].format(cmd)
ret.append(self.curse_add_line(msg, decoration=process_decoration, splittable=True))
if arguments:
msg = ' ' + self.layout_stat['command'].format(arguments)
msg = ' ' if args.cursor_process_name_position == 0 else unicode_message('THREE_DOTS')
msg += self.layout_stat['command'].format(arguments[args.cursor_process_name_position :])
ret.append(self.curse_add_line(msg, splittable=True))
else:
msg = self.layout_stat['name'].format(bare_process_name)
@ -596,7 +599,7 @@ class ProcesslistPlugin(GlancesPluginModel):
def add_title_line(self, ret, prog):
ret.append(self.curse_add_line("Pinned thread ", "TITLE"))
ret.append(self.curse_add_line(prog['name'], "UNDERLINE"))
ret.append(self.curse_add_line(prog.get('name', ''), "UNDERLINE"))
ret.append(self.curse_add_line(" ('e' to unpin)"))
return ret
@ -604,7 +607,9 @@ class ProcesslistPlugin(GlancesPluginModel):
def add_cpu_line(self, ret, prog):
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(' CPU Min/Max/Mean: '))
msg = '{: >7.1f}{: >7.1f}{: >7.1f}%'.format(prog['cpu_min'], prog['cpu_max'], prog['cpu_mean'])
msg = '{: >7.1f}{: >7.1f}{: >7.1f}%'.format(
prog.get('cpu_min', 0), prog.get('cpu_max', 0), prog.get('cpu_mean', 0)
)
ret.append(self.curse_add_line(msg, decoration='INFO'))
return ret
@ -612,7 +617,7 @@ class ProcesslistPlugin(GlancesPluginModel):
def maybe_add_cpu_affinity_line(self, ret, prog):
if 'cpu_affinity' in prog and prog['cpu_affinity'] is not None:
ret.append(self.curse_add_line(' Affinity: '))
ret.append(self.curse_add_line(str(len(prog['cpu_affinity'])), decoration='INFO'))
ret.append(self.curse_add_line(str(len(prog.get('cpu_affinity', []))), decoration='INFO'))
ret.append(self.curse_add_line(' cores', decoration='INFO'))
return ret
@ -655,7 +660,7 @@ class ProcesslistPlugin(GlancesPluginModel):
if 'memory_swap' in prog and prog['memory_swap'] is not None:
ret.append(
self.curse_add_line(
self.auto_unit(prog['memory_swap'], low_precision=False), decoration='INFO', splittable=True
self.auto_unit(prog.get('memory_swap', 0), low_precision=False), decoration='INFO', splittable=True
)
)
ret.append(self.curse_add_line(' swap ', splittable=True))
@ -678,7 +683,9 @@ class ProcesslistPlugin(GlancesPluginModel):
def add_memory_line(self, ret, prog):
ret.append(self.curse_new_line())
ret.append(self.curse_add_line(' MEM Min/Max/Mean: '))
msg = '{: >7.1f}{: >7.1f}{: >7.1f}%'.format(prog['memory_min'], prog['memory_max'], prog['memory_mean'])
msg = '{: >7.1f}{: >7.1f}{: >7.1f}%'.format(
prog.get('memory_min', 0), prog.get('memory_max', 0), prog.get('memory_mean', 0)
)
ret.append(self.curse_add_line(msg, decoration='INFO'))
if 'memory_info' in prog and prog['memory_info'] is not None:
ret.append(self.curse_add_line(' Memory info: '))
@ -692,7 +699,7 @@ class ProcesslistPlugin(GlancesPluginModel):
ret.append(self.curse_add_line(' Open: '))
for stat_prefix in ['num_threads', 'num_fds', 'num_handles', 'tcp', 'udp']:
if stat_prefix in prog and prog[stat_prefix] is not None:
ret.append(self.curse_add_line(str(prog[stat_prefix]), decoration='INFO'))
ret.append(self.curse_add_line(str(prog.get(stat_prefix, 0)), decoration='INFO'))
ret.append(self.curse_add_line(' {} '.format(stat_prefix.replace('num_', ''))))
return ret
@ -735,8 +742,9 @@ class ProcesslistPlugin(GlancesPluginModel):
msg = self.layout_header['mem'].format('MEM%')
ret.append(self.curse_add_line(msg, sort_style if process_sort_key == 'memory_percent' else 'DEFAULT'))
if 'memory_info' in display_stats:
msg = self.layout_header['virt'].format('VIRT')
ret.append(self.curse_add_line(msg, optional=True))
if not self.get_conf_value('disable_virtual_memory', convert_bool=True, default=False):
msg = self.layout_header['virt'].format('VIRT')
ret.append(self.curse_add_line(msg, optional=True))
msg = self.layout_header['res'].format('RES')
ret.append(self.curse_add_line(msg, optional=True))
if 'pid' in display_stats:
@ -903,7 +911,7 @@ class ProcesslistPlugin(GlancesPluginModel):
continue
if sub_key is None:
ret += p[key]
else:
elif sub_key in p[key]:
ret += p[key][sub_key]
# Manage Min/Max/Mean

Some files were not shown because too many files have changed in this diff Show More